You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2015/09/18 22:35:06 UTC

[01/41] hive git commit: HIVE-5623: ORC accessing array column that's empty will fail with java out of bound exception (Prasanth Jayachandran reviewed by Ashutosh Chauhan)

Repository: hive
Updated Branches:
  refs/heads/llap 79c70311f -> f324305a7


HIVE-5623: ORC accessing array column that's empty will fail with java out of bound exception (Prasanth Jayachandran reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0bc96772
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0bc96772
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0bc96772

Branch: refs/heads/llap
Commit: 0bc96772e440653815792d3e37db3e9063e6c2e1
Parents: da0be3d
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Mon Sep 14 16:06:18 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Mon Sep 14 16:06:18 2015 -0500

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java     | 2 +-
 ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0bc96772/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
index b1a32bc..7a17b92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcStruct.java
@@ -435,7 +435,7 @@ final public class OrcStruct implements Writable {
 
     @Override
     public Object getListElement(Object list, int i) {
-      if (list == null) {
+      if (list == null || i < 0 || i >= getListLength(list)) {
         return null;
       }
       return ((List) list).get(i);

http://git-wip-us.apache.org/repos/asf/hive/blob/0bc96772/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java
index 8fc0693..2e431c8 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcStruct.java
@@ -94,6 +94,8 @@ public class TestOrcStruct {
         inspector.getAllStructFieldRefs().get(12).getFieldObjectInspector();
     assertEquals(ObjectInspector.Category.LIST, listOI.getCategory());
     assertEquals(10, listOI.getListElement(list, 10));
+    assertEquals(null, listOI.getListElement(list, -1));
+    assertEquals(null, listOI.getListElement(list, 13));
     assertEquals(13, listOI.getListLength(list));
 
     Map<Integer, Integer> map = new HashMap<Integer,Integer>();


[30/41] hive git commit: HIVE-11842: Improve RuleRegExp by caching some internal data structures (Jesus Camacho Rodriguez, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11842: Improve RuleRegExp by caching some internal data structures (Jesus Camacho Rodriguez, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/79244ab4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/79244ab4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/79244ab4

Branch: refs/heads/llap
Commit: 79244ab453823b8787b70a08f923e25c2abbd0bf
Parents: 8d524e0
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Sep 17 17:46:55 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Sep 17 17:46:55 2015 +0100

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/lib/RuleRegExp.java   | 61 ++++++++++++++++----
 1 file changed, 51 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/79244ab4/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
index fd5f133..1e850d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/RuleRegExp.java
@@ -19,7 +19,9 @@
 package org.apache.hadoop.hive.ql.lib;
 
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 import java.util.regex.Matcher;
@@ -125,6 +127,12 @@ public class RuleRegExp implements Rule {
    */
   private int costPatternWithoutWildCardChar(Stack<Node> stack) throws SemanticException {
     int numElems = (stack != null ? stack.size() : 0);
+
+    // No elements
+    if (numElems == 0) {
+      return -1;
+    }
+
     int patLen = patternWithoutWildCardChar.length();
     StringBuilder name = new StringBuilder(patLen + numElems);
     for (int pos = numElems - 1; pos >= 0; pos--) {
@@ -133,9 +141,8 @@ public class RuleRegExp implements Rule {
       if (name.length() >= patLen) {
         if (patternWithoutWildCardChar.contentEquals(name)) {
           return patLen;
-        } else {
-          return -1;
         }
+        break;
       }
     }
     return -1;
@@ -152,20 +159,54 @@ public class RuleRegExp implements Rule {
    */
   private int costPatternWithORWildCardChar(Stack<Node> stack) throws SemanticException {
     int numElems = (stack != null ? stack.size() : 0);
+
+    // No elements
+    if (numElems == 0) {
+      return -1;
+    }
+
+    // These DS are used to cache previously created String
+    Map<Integer,String> cachedNames = new HashMap<Integer,String>();
+    int maxDepth = numElems;
+    int maxLength = 0;
+
+    // For every pattern
     for (String pattern : patternORWildChar) {
       int patLen = pattern.length();
 
-      StringBuilder name = new StringBuilder(patLen + numElems);
-      for (int pos = numElems - 1; pos >= 0; pos--) {
-        String nodeName = stack.get(pos).getName() + "%";
-        name.insert(0, nodeName);
-        if (name.length() >= patLen) {
-          if (pattern.contentEquals(name)) {
-            return patLen;
-          } else {
+      // If the stack has been explored already till that level,
+      // obtained cached String
+      if (cachedNames.containsKey(patLen)) {
+        if (pattern.contentEquals(cachedNames.get(patLen))) {
+          return patLen;
+        }
+      } else if (maxLength >= patLen) {
+        // We have already explored the stack deep enough, but
+        // we do not have a matching
+        continue;
+      } else {
+        // We are going to build the name
+        StringBuilder name = new StringBuilder(patLen + numElems);
+        if (maxLength != 0) {
+          name.append(cachedNames.get(maxLength));
+        }
+        for (int pos = maxDepth - 1; pos >= 0; pos--) {
+          String nodeName = stack.get(pos).getName() + "%";
+          name.insert(0, nodeName);
+
+          // We cache the values
+          cachedNames.put(name.length(), name.toString());
+          maxLength = name.length();
+          maxDepth--;
+
+          if (name.length() >= patLen) {
+            if (pattern.contentEquals(name)) {
+              return patLen;
+            }
             break;
           }
         }
+        
       }
     }
     return -1;


[27/41] hive git commit: HIVE-11816 : Upgrade groovy to 2.4.4 (Szehon, reviewed by Xuefu)

Posted by se...@apache.org.
HIVE-11816 : Upgrade groovy to 2.4.4 (Szehon, reviewed by Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57158da6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57158da6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57158da6

Branch: refs/heads/llap
Commit: 57158da63d6ab2b96b5c5b2ed8cba75d46467777
Parents: ce71355
Author: Szehon Ho <sz...@cloudera.com>
Authored: Wed Sep 16 11:24:59 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Wed Sep 16 11:24:59 2015 -0700

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/57158da6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 342224d..2a83070 100644
--- a/pom.xml
+++ b/pom.xml
@@ -121,7 +121,7 @@
     <derby.version>10.10.2.0</derby.version>
     <dropwizard.version>3.1.0</dropwizard.version>
     <guava.version>14.0.1</guava.version>
-    <groovy.version>2.1.6</groovy.version>
+    <groovy.version>2.4.4</groovy.version>
     <hadoop-20S.version>1.2.1</hadoop-20S.version>
     <hadoop-23.version>2.6.0</hadoop-23.version>
     <hadoop.bin.path>${basedir}/${hive.path.to.root}/testutils/hadoop</hadoop.bin.path>


[28/41] hive git commit: HIVE-11839: Vectorization wrong results with filter of (CAST AS CHAR) (Matt McCline reviewed by Sergey Shelukin)

Posted by se...@apache.org.
HIVE-11839: Vectorization wrong results with filter of (CAST AS CHAR) (Matt McCline reviewed by Sergey Shelukin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7be02aec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7be02aec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7be02aec

Branch: refs/heads/llap
Commit: 7be02aec7321d87889c0c06cff040d5fa2200e64
Parents: 57158da
Author: Matt McCline <mm...@hortonworks.com>
Authored: Wed Sep 16 14:55:12 2015 -0700
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Wed Sep 16 14:55:12 2015 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |  1 +
 .../ql/exec/vector/VectorizationContext.java    | 10 +++---
 .../queries/clientpositive/vector_char_cast.q   |  9 +++++
 .../clientpositive/tez/vector_char_cast.q.out   | 35 ++++++++++++++++++++
 .../clientpositive/vector_char_cast.q.out       | 35 ++++++++++++++++++++
 5 files changed, 85 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7be02aec/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 2851720..0d3e1cc 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -192,6 +192,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\
   vector_between_in.q,\
   vector_binary_join_groupby.q,\
   vector_bucket.q,\
+  vector_char_cast.q,\
   vector_cast_constant.q,\
   vector_char_2.q,\
   vector_char_4.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/7be02aec/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index 34f9329..2483196 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -1528,7 +1528,7 @@ public class VectorizationContext {
       // Boolean must come before the integer family. It's a special case.
       return createVectorExpression(CastBooleanToStringViaLongToString.class, childExpr, Mode.PROJECTION, null);
     } else if (isIntFamily(inputType)) {
-      return createVectorExpression(CastLongToString.class, childExpr, Mode.PROJECTION, null);
+      return createVectorExpression(CastLongToString.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDecimalFamily(inputType)) {
       return createVectorExpression(CastDecimalToString.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDateFamily(inputType)) {
@@ -1554,9 +1554,9 @@ public class VectorizationContext {
     }
     if (inputType.equals("boolean")) {
       // Boolean must come before the integer family. It's a special case.
-      return createVectorExpression(CastBooleanToCharViaLongToChar.class, childExpr, Mode.PROJECTION, null);
+      return createVectorExpression(CastBooleanToCharViaLongToChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isIntFamily(inputType)) {
-      return createVectorExpression(CastLongToChar.class, childExpr, Mode.PROJECTION, null);
+      return createVectorExpression(CastLongToChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDecimalFamily(inputType)) {
       return createVectorExpression(CastDecimalToChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDateFamily(inputType)) {
@@ -1583,9 +1583,9 @@ public class VectorizationContext {
     }
     if (inputType.equals("boolean")) {
       // Boolean must come before the integer family. It's a special case.
-      return createVectorExpression(CastBooleanToVarCharViaLongToVarChar.class, childExpr, Mode.PROJECTION, null);
+      return createVectorExpression(CastBooleanToVarCharViaLongToVarChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isIntFamily(inputType)) {
-      return createVectorExpression(CastLongToVarChar.class, childExpr, Mode.PROJECTION, null);
+      return createVectorExpression(CastLongToVarChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDecimalFamily(inputType)) {
       return createVectorExpression(CastDecimalToVarChar.class, childExpr, Mode.PROJECTION, returnType);
     } else if (isDateFamily(inputType)) {

http://git-wip-us.apache.org/repos/asf/hive/blob/7be02aec/ql/src/test/queries/clientpositive/vector_char_cast.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_char_cast.q b/ql/src/test/queries/clientpositive/vector_char_cast.q
new file mode 100644
index 0000000..bc78d51
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_char_cast.q
@@ -0,0 +1,9 @@
+create table s1(id smallint) stored as orc;
+
+insert into table s1 values (1000),(1001),(1002),(1003),(1000);
+
+set hive.vectorized.execution.enabled=true;
+select count(1) from s1 where cast(id as char(4))='1000';
+
+set hive.vectorized.execution.enabled=false;
+select count(1) from s1 where cast(id as char(4))='1000';
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7be02aec/ql/src/test/results/clientpositive/tez/vector_char_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_char_cast.q.out b/ql/src/test/results/clientpositive/tez/vector_char_cast.q.out
new file mode 100644
index 0000000..dbeae74
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/vector_char_cast.q.out
@@ -0,0 +1,35 @@
+PREHOOK: query: create table s1(id smallint) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s1
+POSTHOOK: query: create table s1(id smallint) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s1
+PREHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@s1
+POSTHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@s1
+POSTHOOK: Lineage: s1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s1
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s1
+#### A masked pattern was here ####
+2

http://git-wip-us.apache.org/repos/asf/hive/blob/7be02aec/ql/src/test/results/clientpositive/vector_char_cast.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_cast.q.out b/ql/src/test/results/clientpositive/vector_char_cast.q.out
new file mode 100644
index 0000000..dbeae74
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_char_cast.q.out
@@ -0,0 +1,35 @@
+PREHOOK: query: create table s1(id smallint) stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@s1
+POSTHOOK: query: create table s1(id smallint) stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@s1
+PREHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@s1
+POSTHOOK: query: insert into table s1 values (1000),(1001),(1002),(1003),(1000)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@s1
+POSTHOOK: Lineage: s1.id EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s1
+#### A masked pattern was here ####
+2
+PREHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@s1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(1) from s1 where cast(id as char(4))='1000'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@s1
+#### A masked pattern was here ####
+2


[06/41] hive git commit: HIVE-11745 : Alter table Exchange partition with multiple partition_spec is not working (Yongzhi Chen via Szehon)

Posted by se...@apache.org.
HIVE-11745 : Alter table Exchange partition with multiple partition_spec is not working (Yongzhi Chen via Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c6895897
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c6895897
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c6895897

Branch: refs/heads/llap
Commit: c6895897bc8178546b10780c5f463353c4ee1b9a
Parents: 5548a9c
Author: Szehon Ho <sz...@cloudera.com>
Authored: Tue Sep 15 16:39:50 2015 -0700
Committer: Szehon Ho <sz...@cloudera.com>
Committed: Tue Sep 15 16:40:26 2015 -0700

----------------------------------------------------------------------
 .../hive/ql/security/FolderPermissionBase.java  |  17 +-
 .../test/resources/testconfiguration.properties |   1 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   6 +
 .../queries/clientpositive/exchgpartition2lel.q |  32 ++++
 .../clientpositive/exchgpartition2lel.q.out     | 182 +++++++++++++++++++
 5 files changed, 237 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c6895897/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
index f28edc6..d98082f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/FolderPermissionBase.java
@@ -370,7 +370,7 @@ public abstract class FolderPermissionBase {
   }
 
   @Test
-  public void testAlterPartition() throws Exception {
+  public void testPartition() throws Exception {
     String tableName = "alterpart";
     CommandProcessorResponse ret = driver.run("CREATE TABLE " + tableName + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
     Assert.assertEquals(0,ret.getResponseCode());
@@ -396,6 +396,21 @@ public abstract class FolderPermissionBase {
     for (String child : listStatus(warehouseDir + "/" + tableName + "/part1=2/part2=2/part3=2")) {
       verifyPermission(child, 1);
     }
+
+    String tableName2 = "alterpart2";
+    ret = driver.run("CREATE TABLE " + tableName2 + " (key string, value string) partitioned by (part1 int, part2 int, part3 int)");
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    assertExistence(warehouseDir + "/" + tableName2);
+    setPermission(warehouseDir + "/" + tableName2);
+    ret = driver.run("alter table " + tableName2 + " exchange partition (part1='2',part2='2',part3='2') with table " + tableName);
+    Assert.assertEquals(0,ret.getResponseCode());
+
+    //alter exchange can not change base table's permission
+    //alter exchange can only control final partition folder's permission
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2", 0);
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2", 0);
+    verifyPermission(warehouseDir + "/" + tableName2 + "/part1=2/part2=2/part3=2", 1);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hive/blob/c6895897/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index bed621d..2851720 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -12,6 +12,7 @@ minimr.query.files=auto_sortmerge_join_16.q,\
   constprog_partitioner.q,\
   disable_merge_for_bucketing.q,\
   empty_dir_in_table.q,\
+  exchgpartition2lel.q,\
   external_table_with_space_in_location_path.q,\
   file_with_header_footer.q,\
   groupby2.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/c6895897/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 1840e76..a80f686 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2596,6 +2596,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           ms.dropPartition(partition.getDbName(), sourceTable.getTableName(),
             partition.getValues());
         }
+        Path destParentPath = destPath.getParent();
+        if (!wh.isDir(destParentPath)) {
+          if (!wh.mkdirs(destParentPath, true)) {
+              throw new MetaException("Unable to create path " + destParentPath);
+          }
+        }
         /**
          * TODO: Use the hard link feature of hdfs
          * once https://issues.apache.org/jira/browse/HDFS-3370 is done

http://git-wip-us.apache.org/repos/asf/hive/blob/c6895897/ql/src/test/queries/clientpositive/exchgpartition2lel.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/exchgpartition2lel.q b/ql/src/test/queries/clientpositive/exchgpartition2lel.q
new file mode 100644
index 0000000..2b15894
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/exchgpartition2lel.q
@@ -0,0 +1,32 @@
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP TABLE IF EXISTS t3;
+DROP TABLE IF EXISTS t4;
+
+CREATE TABLE t1 (a int) PARTITIONED BY (d1 int);
+CREATE TABLE t2 (a int) PARTITIONED BY (d1 int);
+CREATE TABLE t3 (a int) PARTITIONED BY (d1 int, d2 int);
+CREATE TABLE t4 (a int) PARTITIONED BY (d1 int, d2 int);
+CREATE TABLE t5 (a int) PARTITIONED BY (d1 int, d2 int, d3 int);
+CREATE TABLE t6 (a int) PARTITIONED BY (d1 int, d2 int, d3 int);
+
+INSERT OVERWRITE TABLE t1 PARTITION (d1 = 1) SELECT key FROM src where key = 100 limit 1;
+INSERT OVERWRITE TABLE t3 PARTITION (d1 = 1, d2 = 1) SELECT key FROM src where key = 100 limit 1;
+INSERT OVERWRITE TABLE t5 PARTITION (d1 = 1, d2 = 1, d3=1) SELECT key FROM src where key = 100 limit 1;
+
+SELECT * FROM t1;
+
+SELECT * FROM t3;
+
+ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1;
+SELECT * FROM t1;
+SELECT * FROM t2;
+
+ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3;
+SELECT * FROM t3;
+SELECT * FROM t4;
+
+ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5;
+SELECT * FROM t5;
+SELECT * FROM t6;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/c6895897/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/exchgpartition2lel.q.out b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
new file mode 100644
index 0000000..5997d6b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/exchgpartition2lel.q.out
@@ -0,0 +1,182 @@
+PREHOOK: query: DROP TABLE IF EXISTS t1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS t1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS t2
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS t2
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS t3
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS t3
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: DROP TABLE IF EXISTS t4
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS t4
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE t1 (a int) PARTITIONED BY (d1 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t1
+POSTHOOK: query: CREATE TABLE t1 (a int) PARTITIONED BY (d1 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t1
+PREHOOK: query: CREATE TABLE t2 (a int) PARTITIONED BY (d1 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t2
+POSTHOOK: query: CREATE TABLE t2 (a int) PARTITIONED BY (d1 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t2
+PREHOOK: query: CREATE TABLE t3 (a int) PARTITIONED BY (d1 int, d2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t3
+POSTHOOK: query: CREATE TABLE t3 (a int) PARTITIONED BY (d1 int, d2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t3
+PREHOOK: query: CREATE TABLE t4 (a int) PARTITIONED BY (d1 int, d2 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t4
+POSTHOOK: query: CREATE TABLE t4 (a int) PARTITIONED BY (d1 int, d2 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t4
+PREHOOK: query: CREATE TABLE t5 (a int) PARTITIONED BY (d1 int, d2 int, d3 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t5
+POSTHOOK: query: CREATE TABLE t5 (a int) PARTITIONED BY (d1 int, d2 int, d3 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t5
+PREHOOK: query: CREATE TABLE t6 (a int) PARTITIONED BY (d1 int, d2 int, d3 int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@t6
+POSTHOOK: query: CREATE TABLE t6 (a int) PARTITIONED BY (d1 int, d2 int, d3 int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@t6
+PREHOOK: query: INSERT OVERWRITE TABLE t1 PARTITION (d1 = 1) SELECT key FROM src where key = 100 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t1@d1=1
+POSTHOOK: query: INSERT OVERWRITE TABLE t1 PARTITION (d1 = 1) SELECT key FROM src where key = 100 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t1@d1=1
+POSTHOOK: Lineage: t1 PARTITION(d1=1).a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE t3 PARTITION (d1 = 1, d2 = 1) SELECT key FROM src where key = 100 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t3@d1=1/d2=1
+POSTHOOK: query: INSERT OVERWRITE TABLE t3 PARTITION (d1 = 1, d2 = 1) SELECT key FROM src where key = 100 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t3@d1=1/d2=1
+POSTHOOK: Lineage: t3 PARTITION(d1=1,d2=1).a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: INSERT OVERWRITE TABLE t5 PARTITION (d1 = 1, d2 = 1, d3=1) SELECT key FROM src where key = 100 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@t5@d1=1/d2=1/d3=1
+POSTHOOK: query: INSERT OVERWRITE TABLE t5 PARTITION (d1 = 1, d2 = 1, d3=1) SELECT key FROM src where key = 100 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@t5@d1=1/d2=1/d3=1
+POSTHOOK: Lineage: t5 PARTITION(d1=1,d2=1,d3=1).a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+PREHOOK: query: SELECT * FROM t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+PREHOOK: Input: default@t1@d1=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+POSTHOOK: Input: default@t1@d1=1
+#### A masked pattern was here ####
+100	1
+PREHOOK: query: SELECT * FROM t3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+PREHOOK: Input: default@t3@d1=1/d2=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+POSTHOOK: Input: default@t3@d1=1/d2=1
+#### A masked pattern was here ####
+100	1	1
+PREHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
+PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: query: ALTER TABLE t2 EXCHANGE PARTITION (d1 = 1) WITH TABLE t1
+POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: query: SELECT * FROM t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+PREHOOK: query: SELECT * FROM t2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t2
+PREHOOK: Input: default@t2@d1=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t2
+POSTHOOK: Input: default@t2@d1=1
+#### A masked pattern was here ####
+100	1
+PREHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
+PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: query: ALTER TABLE t4 EXCHANGE PARTITION (d1 = 1, d2 = 1) WITH TABLE t3
+POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: query: SELECT * FROM t3
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t3
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t3
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t3
+#### A masked pattern was here ####
+PREHOOK: query: SELECT * FROM t4
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t4
+PREHOOK: Input: default@t4@d1=1/d2=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t4
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t4
+POSTHOOK: Input: default@t4@d1=1/d2=1
+#### A masked pattern was here ####
+100	1	1
+PREHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
+PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+POSTHOOK: query: ALTER TABLE t6 EXCHANGE PARTITION (d1 = 1, d2 = 1, d3 = 1) WITH TABLE t5
+POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION
+PREHOOK: query: SELECT * FROM t5
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t5
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t5
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t5
+#### A masked pattern was here ####
+PREHOOK: query: SELECT * FROM t6
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t6
+PREHOOK: Input: default@t6@d1=1/d2=1/d3=1
+#### A masked pattern was here ####
+POSTHOOK: query: SELECT * FROM t6
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t6
+POSTHOOK: Input: default@t6@d1=1/d2=1/d3=1
+#### A masked pattern was here ####
+100	1	1	1


[04/41] hive git commit: HIVE-11817: Window function max NullPointerException (Jimmy, reviewed by Szehon)

Posted by se...@apache.org.
HIVE-11817: Window function max NullPointerException (Jimmy, reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5548a9c3
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5548a9c3
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5548a9c3

Branch: refs/heads/llap
Commit: 5548a9c376c2c59cbad9788689e58daa1a11f21e
Parents: 07ca812
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Mon Sep 14 12:47:18 2015 -0700
Committer: Jimmy Xiang <jx...@cloudera.com>
Committed: Tue Sep 15 09:14:38 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/udf/generic/GenericUDAFMax.java  | 16 +++++++++-------
 ql/src/test/queries/clientpositive/windowing_udaf.q |  4 ++++
 .../results/clientpositive/windowing_udaf.q.out     | 12 ++++++++++++
 3 files changed, 25 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5548a9c3/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java
index 33600f2..55a6a62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFMax.java
@@ -287,14 +287,16 @@ public class GenericUDAFMax extends AbstractGenericUDAFResolver {
       // For the case: X following and Y following, process first Y-X results and then insert X nulls.
       // For the case X preceding and Y following, process Y results.
       for (int i = Math.max(0, wFrameDef.getStart().getRelativeOffset()); i < wFrameDef.getEnd().getRelativeOffset(); i++) {
-        s.results.add(r[0]);
+        s.results.add(r == null ? null : r[0]);
         s.numRows++;
-        int fIdx = (Integer) r[1];
-        if (!wFrameDef.isStartUnbounded()
-            && s.numRows + i >= fIdx + wFrameDef.getWindowSize()
-            && !s.maxChain.isEmpty()) {
-          s.maxChain.removeFirst();
-          r = !s.maxChain.isEmpty() ? s.maxChain.getFirst() : r;
+        if (r != null) {
+          int fIdx = (Integer) r[1];
+          if (!wFrameDef.isStartUnbounded()
+              && s.numRows + i >= fIdx + wFrameDef.getWindowSize()
+              && !s.maxChain.isEmpty()) {
+            s.maxChain.removeFirst();
+            r = !s.maxChain.isEmpty() ? s.maxChain.getFirst() : r;
+          }
         }
       }
       for (int i = 0; i < wFrameDef.getStart().getRelativeOffset(); i++) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5548a9c3/ql/src/test/queries/clientpositive/windowing_udaf.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/windowing_udaf.q b/ql/src/test/queries/clientpositive/windowing_udaf.q
index 0173ab7..45c5e5b 100644
--- a/ql/src/test/queries/clientpositive/windowing_udaf.q
+++ b/ql/src/test/queries/clientpositive/windowing_udaf.q
@@ -26,3 +26,7 @@ select s, avg(i) over (partition by t, b order by s) from over10k limit 100;
 select max(i) over w from over10k window w as (partition by f) limit 100;
 
 select s, avg(d) over (partition by t order by f) from over10k limit 100;
+
+select key, max(value) over
+  (order by key rows between 10 preceding and 20 following)
+from src1 where length(key) > 10;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/5548a9c3/ql/src/test/results/clientpositive/windowing_udaf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_udaf.q.out b/ql/src/test/results/clientpositive/windowing_udaf.q.out
index 9d50251..298c1dd 100644
--- a/ql/src/test/results/clientpositive/windowing_udaf.q.out
+++ b/ql/src/test/results/clientpositive/windowing_udaf.q.out
@@ -584,3 +584,15 @@ zach zipper	19.822727272727274
 ulysses king	18.273333333333333
 bob king	17.664615384615384
 luke carson	18.02785714285714
+PREHOOK: query: select key, max(value) over
+  (order by key rows between 10 preceding and 20 following)
+from src1 where length(key) > 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src1
+#### A masked pattern was here ####
+POSTHOOK: query: select key, max(value) over
+  (order by key rows between 10 preceding and 20 following)
+from src1 where length(key) > 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src1
+#### A masked pattern was here ####


[11/41] hive git commit: HIVE-11832: HIVE-11802 breaks compilation in JDK 8

Posted by se...@apache.org.
HIVE-11832: HIVE-11802 breaks compilation in JDK 8


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/201b1a00
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/201b1a00
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/201b1a00

Branch: refs/heads/llap
Commit: 201b1a004adbefab1b48098241c0fc43d3dd0dcf
Parents: 5a550cb
Author: Sergio Pena <se...@cloudera.com>
Authored: Wed Sep 16 11:22:27 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Wed Sep 16 11:22:27 2015 -0500

----------------------------------------------------------------------
 service/src/java/org/apache/hive/service/cli/Column.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/201b1a00/service/src/java/org/apache/hive/service/cli/Column.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/Column.java b/service/src/java/org/apache/hive/service/cli/Column.java
index 31091a3..adb269a 100644
--- a/service/src/java/org/apache/hive/service/cli/Column.java
+++ b/service/src/java/org/apache/hive/service/cli/Column.java
@@ -40,7 +40,6 @@ import org.apache.hive.service.cli.thrift.TI16Column;
 import org.apache.hive.service.cli.thrift.TI32Column;
 import org.apache.hive.service.cli.thrift.TI64Column;
 import org.apache.hive.service.cli.thrift.TStringColumn;
-import sun.misc.FloatingDecimal;
 
 /**
  * Column.
@@ -350,7 +349,7 @@ public class Column extends AbstractList {
         break;
       case FLOAT_TYPE:
         nulls.set(size, field == null);
-        doubleVars()[size] = field == null ? 0 : new FloatingDecimal((Float)field).doubleValue();
+        doubleVars()[size] = field == null ? 0 : new Double(field.toString());
         break;
       case DOUBLE_TYPE:
         nulls.set(size, field == null);


[19/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
index 22be1d7..122e87a 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
@@ -89,19 +89,19 @@ STAGE PLANS:
                     predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                        keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                        aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                        keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           sort order: +++++
-                          Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized
@@ -109,12 +109,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-                keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+                  expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
@@ -342,19 +342,19 @@ STAGE PLANS:
                     predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                        keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                        aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                        keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           sort order: +++++
-                          Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized
@@ -362,12 +362,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-                keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+                  expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
index 8013bfe..ec77280 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
@@ -85,19 +85,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cint, cfloat, cdouble, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_samp(_col0), min(_col2), stddev_samp(_col4), var_pop(_col4), var_samp(_col5), stddev_pop(_col5)
-                        keys: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                        aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                        keys: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          key expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                           sort order: +++++++
-                          Map-reduce partition columns: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                           Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized
@@ -105,12 +105,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
-                keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: int), KEY._col2 (type: float), KEY._col3 (type: double), KEY._col4 (type: string), KEY._col5 (type: timestamp), KEY._col6 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                 Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col5)) (type: double), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - UDFToDouble(_col5))) (type: double), _col12 (type: double)
+                  expressions: _col2 (type: float), _col6 (type: boolean), _col3 (type: double), _col4 (type: string), _col0 (type: tinyint), _col1 (type: int), _col5 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col1)) (type: double), _col8 (type: double), (_col3 * 79.553) (type: double), (33.0 % _col2) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col3) (type: double), (- _col0) (type: tinyint), _col11 (type: double), (UDFToFloat(_col1) - _col2) (type: float), (-23 % UDFToInteger(_col0)) (type: int), (- (-26.28 - UDFToDouble(_col1))) (type: double), _col12 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
index a42c30a..3326044 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out
@@ -62,19 +62,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
                     Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2
+                      expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      outputColumnNames: cdouble, cstring1, ctimestamp1
                       Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                        keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                        aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                        keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           sort order: +++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized
@@ -82,12 +82,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-                keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+                keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+                  expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                   Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
index a42c30a..3326044 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out
@@ -62,19 +62,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
                     Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2
+                      expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      outputColumnNames: cdouble, cstring1, ctimestamp1
                       Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                        keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                        aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                        keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           sort order: +++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized
@@ -82,12 +82,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-                keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+                keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+                  expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                   Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
index eb8914b..59caac7 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_pushdown.q.out
@@ -25,10 +25,10 @@ STAGE PLANS:
                     Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint)
-                      outputColumnNames: _col0
+                      outputColumnNames: cbigint
                       Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0)
+                        aggregations: avg(cbigint)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 3d17aba..7b4f846 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -156,10 +156,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cint, cdouble, csmallint, cfloat, ctinyint
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), sum(_col1), stddev_pop(_col0), stddev_samp(_col2), var_samp(_col0), avg(_col3), stddev_samp(_col0), min(_col4), count(_col2)
+                        aggregations: avg(cint), sum(cdouble), stddev_pop(cint), stddev_samp(csmallint), var_samp(cint), avg(cfloat), stddev_samp(cint), min(ctinyint), count(csmallint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
@@ -368,10 +368,10 @@ STAGE PLANS:
                     Statistics: Num rows: 6826 Data size: 209555 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cint, cbigint, csmallint, cdouble, ctinyint
                       Statistics: Num rows: 6826 Data size: 209555 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col0), var_pop(_col1), stddev_pop(_col2), max(_col3), avg(_col4), min(_col0), min(_col3), stddev_samp(_col2), var_samp(_col0)
+                        aggregations: max(cint), var_pop(cbigint), stddev_pop(csmallint), max(cdouble), avg(ctinyint), min(cint), min(cdouble), stddev_samp(csmallint), var_samp(cint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -571,10 +571,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cbigint, ctinyint, csmallint, cint, cdouble
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: var_pop(_col0), count(), max(_col1), stddev_pop(_col2), max(_col3), stddev_samp(_col4), count(_col1), avg(_col1)
+                        aggregations: var_pop(cbigint), count(), max(ctinyint), stddev_pop(csmallint), max(cint), stddev_samp(cdouble), count(ctinyint), avg(ctinyint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -753,10 +753,10 @@ STAGE PLANS:
                     Statistics: Num rows: 8874 Data size: 272428 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: ctinyint, cbigint, cint, cfloat
                       Statistics: Num rows: 8874 Data size: 272428 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), max(_col1), stddev_samp(_col2), var_pop(_col2), var_pop(_col1), max(_col3)
+                        aggregations: avg(ctinyint), max(cbigint), stddev_samp(cint), var_pop(cint), var_pop(cbigint), max(cfloat)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -1882,11 +1882,11 @@ STAGE PLANS:
                     Statistics: Num rows: 2503 Data size: 76841 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: csmallint (type: smallint), cbigint (type: bigint), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: csmallint, cbigint, ctinyint
                       Statistics: Num rows: 2503 Data size: 76841 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_samp(_col0), sum(_col1), var_pop(_col2), count()
-                        keys: _col0 (type: smallint)
+                        aggregations: stddev_samp(csmallint), sum(cbigint), var_pop(ctinyint), count()
+                        keys: csmallint (type: smallint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 2503 Data size: 76841 Basic stats: COMPLETE Column stats: NONE
@@ -2089,11 +2089,11 @@ STAGE PLANS:
                     Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: cdouble, cfloat
                       Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
-                        keys: _col0 (type: double)
+                        aggregations: var_samp(cdouble), count(cfloat), sum(cfloat), var_pop(cdouble), stddev_pop(cdouble), sum(cdouble)
+                        keys: cdouble (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                         Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
@@ -2343,19 +2343,19 @@ STAGE PLANS:
                     predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
+                      outputColumnNames: cstring1, ctimestamp1, cint, csmallint, ctinyint, cfloat, cdouble
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_pop(_col2), avg(_col3), count(), min(_col4), var_samp(_col3), var_pop(_col5), avg(_col2), var_samp(_col5), avg(_col5), min(_col6), var_pop(_col3), stddev_pop(_col4), sum(_col2)
-                        keys: _col0 (type: timestamp), _col1 (type: string)
+                        aggregations: stddev_pop(cint), avg(csmallint), count(), min(ctinyint), var_samp(csmallint), var_pop(cfloat), avg(cint), var_samp(cfloat), avg(cfloat), min(cdouble), var_pop(csmallint), stddev_pop(ctinyint), sum(cint)
+                        keys: cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                         Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: timestamp), _col1 (type: string)
+                          key expressions: _col0 (type: string), _col1 (type: timestamp)
                           sort order: ++
-                          Map-reduce partition columns: _col0 (type: timestamp), _col1 (type: string)
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
                           Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,input:smallint>), _col4 (type: bigint), _col5 (type: tinyint), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,input:float>), _col11 (type: double), _col12 (type: struct<count:bigint,sum:double,variance:double>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: bigint)
             Execution mode: vectorized
@@ -2363,12 +2363,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_pop(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), min(VALUE._col3), var_samp(VALUE._col4), var_pop(VALUE._col5), avg(VALUE._col6), var_samp(VALUE._col7), avg(VALUE._col8), min(VALUE._col9), var_pop(VALUE._col10), stddev_pop(VALUE._col11), sum(VALUE._col12)
-                keys: KEY._col0 (type: timestamp), KEY._col1 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                 Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (-
  _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
+                  expressions: _col1 (type: timestamp), _col0 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (-
  _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38
                   Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
@@ -2676,11 +2676,11 @@ STAGE PLANS:
                     Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      outputColumnNames: cboolean1, cfloat, cbigint, cint, cdouble, ctinyint, csmallint
                       Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col2), var_samp(_col3), avg(_col4), min(_col2), var_pop(_col2), sum(_col3), stddev_samp(_col5), stddev_pop(_col6), avg(_col3)
-                        keys: _col0 (type: boolean)
+                        aggregations: max(cfloat), sum(cbigint), var_samp(cint), avg(cdouble), min(cbigint), var_pop(cbigint), sum(cint), stddev_samp(ctinyint), stddev_pop(csmallint), avg(cint)
+                        keys: cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
@@ -2915,10 +2915,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: i (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: i
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(i)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3093,10 +3093,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctinyint
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(ctinyint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3160,10 +3160,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: cint
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3227,10 +3227,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cfloat (type: float)
-                    outputColumnNames: _col0
+                    outputColumnNames: cfloat
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cfloat)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3294,10 +3294,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cstring1 (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: cstring1
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cstring1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3361,10 +3361,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cboolean1 (type: boolean)
-                    outputColumnNames: _col0
+                    outputColumnNames: cboolean1
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cboolean1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
index 7ba64b7..316ed63 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
@@ -97,19 +97,15 @@ STAGE PLANS:
                             input vertices:
                               1 Map 4
                             Statistics: Num rows: 7433 Data size: 228226 Basic stats: COMPLETE Column stats: NONE
-                            Select Operator
-                              expressions: _col1 (type: double)
+                            Group By Operator
+                              aggregations: sum(_col1)
+                              mode: hash
                               outputColumnNames: _col0
-                              Statistics: Num rows: 7433 Data size: 228226 Basic stats: COMPLETE Column stats: NONE
-                              Group By Operator
-                                aggregations: sum(_col0)
-                                mode: hash
-                                outputColumnNames: _col0
+                              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                              Reduce Output Operator
+                                sort order: 
                                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                                Reduce Output Operator
-                                  sort order: 
-                                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                                  value expressions: _col0 (type: double)
+                                value expressions: _col0 (type: double)
             Local Work:
               Map Reduce Local Work
             Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
index 290db0d..ed1ba4b 100644
--- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
@@ -644,10 +644,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0), max(_col0), count(_col0), count()
+                      aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count()
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
@@ -725,10 +725,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(ctimestamp1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -815,10 +815,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: avg(_col0), variance(_col0), var_pop(_col0), var_samp(_col0), std(_col0), stddev(_col0), stddev_pop(_col0), stddev_samp(_col0)
+                      aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out
index 55c5970..bbd32fe 100644
--- a/ql/src/test/results/clientpositive/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/stats_only_null.q.out
@@ -85,10 +85,10 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: a, b, c, d
               Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                aggregations: count(), count(a), count(b), count(c), count(d)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
@@ -135,10 +135,10 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: a, b, c, d
               Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                aggregations: count(), count(a), count(b), count(c), count(d)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/stats_ppr_all.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_ppr_all.q.out b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
index 5f6f5d4..7627f7a 100644
--- a/ql/src/test/results/clientpositive/stats_ppr_all.q.out
+++ b/ql/src/test/results/clientpositive/stats_ppr_all.q.out
@@ -77,10 +77,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: order_amount (type: float)
-              outputColumnNames: _col0
+              outputColumnNames: order_amount
               Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(order_amount)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -128,10 +128,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
               Select Operator
                 expressions: order_amount (type: float)
-                outputColumnNames: _col0
+                outputColumnNames: order_amount
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: PARTIAL
                 Group By Operator
-                  aggregations: sum(_col0)
+                  aggregations: sum(order_amount)
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
@@ -176,10 +176,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: order_amount (type: float)
-              outputColumnNames: _col0
+              outputColumnNames: order_amount
               Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(order_amount)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
@@ -227,10 +227,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: order_amount (type: float)
-                outputColumnNames: _col0
+                outputColumnNames: order_amount
                 Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
                 Group By Operator
-                  aggregations: sum(_col0)
+                  aggregations: sum(order_amount)
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subq_where_serialization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subq_where_serialization.q.out b/ql/src/test/results/clientpositive/subq_where_serialization.q.out
index c0b2a2d..2a60036 100644
--- a/ql/src/test/results/clientpositive/subq_where_serialization.q.out
+++ b/ql/src/test/results/clientpositive/subq_where_serialization.q.out
@@ -20,20 +20,16 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_exists_having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_exists_having.q.out b/ql/src/test/results/clientpositive/subquery_exists_having.q.out
index 14819c9..13877fc 100644
--- a/ql/src/test/results/clientpositive/subquery_exists_having.q.out
+++ b/ql/src/test/results/clientpositive/subquery_exists_having.q.out
@@ -35,22 +35,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -182,22 +178,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
           TableScan
             alias: b
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_in.q.out b/ql/src/test/results/clientpositive/subquery_in.q.out
index f82c799..f12af57 100644
--- a/ql/src/test/results/clientpositive/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/subquery_in.q.out
@@ -632,20 +632,16 @@ STAGE PLANS:
             Filter Operator
               predicate: ((key > '9') and value is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -886,20 +882,16 @@ STAGE PLANS:
             Filter Operator
               predicate: l_partkey is not null (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: l_partkey (type: int)
+              Group By Operator
+                keys: l_partkey (type: int)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)


[02/41] hive git commit: HIVE-11544: Improve LazySimpleSerDe null data handling for Byte, Short, Integer, Float, Long and Double. (Gopal V, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11544: Improve LazySimpleSerDe null data handling for Byte, Short, Integer, Float, Long and Double. (Gopal V, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/98049182
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/98049182
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/98049182

Branch: refs/heads/llap
Commit: 980491821e6c33d595e8a0e3abcfbc6c207aa436
Parents: 0bc9677
Author: Gopal V <go...@apache.org>
Authored: Mon Sep 14 18:13:42 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Mon Sep 14 18:13:42 2015 -0700

----------------------------------------------------------------------
 .../benchmark/serde/LazySimpleSerDeBench.java   | 453 +++++++++++++++++++
 .../hadoop/hive/serde2/lazy/LazyByte.java       |   4 +
 .../hadoop/hive/serde2/lazy/LazyDouble.java     |   4 +
 .../hadoop/hive/serde2/lazy/LazyFloat.java      |   4 +
 .../hadoop/hive/serde2/lazy/LazyInteger.java    |   4 +
 .../hadoop/hive/serde2/lazy/LazyLong.java       |   4 +
 .../hadoop/hive/serde2/lazy/LazyShort.java      |   4 +
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |  28 ++
 8 files changed, 505 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
----------------------------------------------------------------------
diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
new file mode 100644
index 0000000..a1b63d5
--- /dev/null
+++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/serde/LazySimpleSerDeBench.java
@@ -0,0 +1,453 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.benchmark.serde;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef;
+import org.apache.hadoop.hive.serde2.lazy.LazyByte;
+import org.apache.hadoop.hive.serde2.lazy.LazyDouble;
+import org.apache.hadoop.hive.serde2.lazy.LazyFloat;
+import org.apache.hadoop.hive.serde2.lazy.LazyInteger;
+import org.apache.hadoop.hive.serde2.lazy.LazyLong;
+import org.apache.hadoop.hive.serde2.lazy.LazyShort;
+import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.runner.Runner;
+import org.openjdk.jmh.runner.RunnerException;
+import org.openjdk.jmh.runner.options.Options;
+import org.openjdk.jmh.runner.options.OptionsBuilder;
+
+@State(Scope.Benchmark)
+public class LazySimpleSerDeBench {
+  /**
+   * This test measures the performance for LazySimpleSerDe.
+   * <p/>
+   * This test uses JMH framework for benchmarking. You may execute this
+   * benchmark tool using JMH command line in different ways:
+   * <p/>
+   * To run using default settings, use: 
+   * $ java -cp target/benchmarks.jar org.apache.hive.benchmark.serde.LazySimpleSerDeBench
+   * <p/>
+   */
+
+  @BenchmarkMode(Mode.AverageTime)
+  @Fork(1)
+  @State(Scope.Thread)
+  @OutputTimeUnit(TimeUnit.NANOSECONDS)
+  public static abstract class AbstractDeserializer {
+    public static final int DEFAULT_ITER_TIME = 1000000;
+
+    public static final int DEFAULT_DATA_SIZE = 4096;
+
+    public int[] offsets = new int[DEFAULT_DATA_SIZE];
+    public int[] sizes = new int[DEFAULT_DATA_SIZE];
+    protected final ByteArrayRef ref = new ByteArrayRef();
+
+    @Setup
+    public abstract void setup();
+
+    @Benchmark
+    @Warmup(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = 2, time = 2, timeUnit = TimeUnit.MILLISECONDS)
+    public void bench() {
+
+    }
+  }
+
+  public static abstract class RandomDataInitializer extends
+      AbstractDeserializer {
+
+    final int width;
+
+    public RandomDataInitializer(final int width) {
+      this.width = width;
+    }
+
+    @Override
+    public void setup() {
+      int len = 0;
+      Random r = new Random();
+      for (int i = 0; i < sizes.length; i++) {
+        sizes[i] = (int) (r.nextInt(width));
+        offsets[i] = len;
+        len += sizes[i];
+      }
+      byte[] data = new byte[len + 1];
+      r.nextBytes(data);
+      ref.setData(data);
+    }
+  }
+
+  public static abstract class GoodDataInitializer extends AbstractDeserializer {
+
+    public final int max;
+
+    public GoodDataInitializer(final int max) {
+      this.max = max;
+    }
+
+    @Override
+    public void setup() {
+      sizes = new int[1024];
+      offsets = new int[sizes.length];
+      ByteArrayOutputStream bos = new ByteArrayOutputStream();
+      Random r = new Random();
+      int len = 0;
+      for (int i = 0; i < sizes.length / 2; i++) {
+        int p = r.nextInt(max);
+        int n = -1 * (p - 1);
+        byte[] ps = String.format("%d", p).getBytes();
+        byte[] ns = String.format("%d", n).getBytes();
+        sizes[2 * i] = ps.length;
+        sizes[2 * i + 1] = ns.length;
+        offsets[2 * i] = len;
+        offsets[2 * i + 1] = len + ps.length;
+        len += ps.length + ns.length;
+        try {
+          bos.write(ns);
+          bos.write(ps);
+        } catch (IOException e) {
+          e.printStackTrace();
+          throw new RuntimeException(e);
+        }
+      }
+      ref.setData(bos.toByteArray());
+    }
+  }
+
+  public static class RandomLazyByte extends RandomDataInitializer {
+
+    public RandomLazyByte() {
+      super(2);
+    }
+
+    final LazyByte obj = new LazyByte(
+        LazyPrimitiveObjectInspectorFactory.LAZY_BYTE_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyByte extends RandomDataInitializer {
+
+    public WorstLazyByte() {
+      super(8);
+    }
+
+    final LazyByte obj = new LazyByte(
+        LazyPrimitiveObjectInspectorFactory.LAZY_BYTE_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyByte extends GoodDataInitializer {
+
+    final LazyByte obj = new LazyByte(
+        LazyPrimitiveObjectInspectorFactory.LAZY_BYTE_OBJECT_INSPECTOR);
+
+    public GoodLazyByte() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class RandomLazyShort extends RandomDataInitializer {
+
+    public RandomLazyShort() {
+      super(2);
+    }
+
+    final LazyShort obj = new LazyShort(
+        LazyPrimitiveObjectInspectorFactory.LAZY_SHORT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyShort extends RandomDataInitializer {
+
+    public WorstLazyShort() {
+      super(8);
+    }
+
+    final LazyShort obj = new LazyShort(
+        LazyPrimitiveObjectInspectorFactory.LAZY_SHORT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyShort extends GoodDataInitializer {
+
+    final LazyShort obj = new LazyShort(
+        LazyPrimitiveObjectInspectorFactory.LAZY_SHORT_OBJECT_INSPECTOR);
+
+    public GoodLazyShort() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class RandomLazyInteger extends RandomDataInitializer {
+
+    public RandomLazyInteger() {
+      super(2);
+    }
+
+    final LazyInteger obj = new LazyInteger(
+        LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyInteger extends RandomDataInitializer {
+
+    public WorstLazyInteger() {
+      super(8);
+    }
+
+    final LazyInteger obj = new LazyInteger(
+        LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyInteger extends GoodDataInitializer {
+
+    final LazyInteger obj = new LazyInteger(
+        LazyPrimitiveObjectInspectorFactory.LAZY_INT_OBJECT_INSPECTOR);
+
+    public GoodLazyInteger() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class RandomLazyFloat extends RandomDataInitializer {
+
+    public RandomLazyFloat() {
+      super(2);
+    }
+
+    final LazyFloat obj = new LazyFloat(
+        LazyPrimitiveObjectInspectorFactory.LAZY_FLOAT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyFloat extends RandomDataInitializer {
+
+    public WorstLazyFloat() {
+      super(8);
+    }
+
+    final LazyFloat obj = new LazyFloat(
+        LazyPrimitiveObjectInspectorFactory.LAZY_FLOAT_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyFloat extends GoodDataInitializer {
+
+    final LazyFloat obj = new LazyFloat(
+        LazyPrimitiveObjectInspectorFactory.LAZY_FLOAT_OBJECT_INSPECTOR);
+
+    public GoodLazyFloat() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class RandomLazyLong extends RandomDataInitializer {
+
+    public RandomLazyLong() {
+      super(2);
+    }
+
+    final LazyLong obj = new LazyLong(
+        LazyPrimitiveObjectInspectorFactory.LAZY_LONG_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyLong extends RandomDataInitializer {
+
+    public WorstLazyLong() {
+      super(8);
+    }
+
+    final LazyLong obj = new LazyLong(
+        LazyPrimitiveObjectInspectorFactory.LAZY_LONG_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyLong extends GoodDataInitializer {
+
+    final LazyLong obj = new LazyLong(
+        LazyPrimitiveObjectInspectorFactory.LAZY_LONG_OBJECT_INSPECTOR);
+
+    public GoodLazyLong() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class RandomLazyDouble extends RandomDataInitializer {
+
+    public RandomLazyDouble() {
+      super(2);
+    }
+
+    final LazyDouble obj = new LazyDouble(
+        LazyPrimitiveObjectInspectorFactory.LAZY_DOUBLE_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class WorstLazyDouble extends RandomDataInitializer {
+
+    public WorstLazyDouble() {
+      super(8);
+    }
+
+    final LazyDouble obj = new LazyDouble(
+        LazyPrimitiveObjectInspectorFactory.LAZY_DOUBLE_OBJECT_INSPECTOR);
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static class GoodLazyDouble extends GoodDataInitializer {
+
+    final LazyDouble obj = new LazyDouble(
+        LazyPrimitiveObjectInspectorFactory.LAZY_DOUBLE_OBJECT_INSPECTOR);
+
+    public GoodLazyDouble() {
+      super(Integer.MAX_VALUE);
+    }
+
+    @Override
+    public void bench() {
+      for (int i = 0; i < DEFAULT_ITER_TIME; i++) {
+        obj.init(ref, offsets[i % sizes.length], sizes[i % sizes.length]);
+      }
+    }
+  }
+
+  public static void main(String[] args) throws RunnerException {
+    Options opt = new OptionsBuilder().include(
+        ".*" + LazySimpleSerDeBench.class.getSimpleName() + ".*").build();
+    new Runner(opt).run();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyByte.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyByte.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyByte.java
index a3b8f76..1f9cead 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyByte.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyByte.java
@@ -48,6 +48,10 @@ public class LazyByte extends
 
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       data.set(parseByte(bytes.getData(), start, length, 10));
       isNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyDouble.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyDouble.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyDouble.java
index 05ca4e9..35c2141 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyDouble.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyDouble.java
@@ -46,6 +46,10 @@ public class LazyDouble extends
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
     String byteData = null;
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       byteData = Text.decode(bytes.getData(), start, length);
       data.set(Double.parseDouble(byteData));

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFloat.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFloat.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFloat.java
index 37676d1..6e132c7 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFloat.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyFloat.java
@@ -46,6 +46,10 @@ public class LazyFloat extends
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
     String byteData = null;
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       byteData = Text.decode(bytes.getData(), start, length);
       data.set(Float.parseFloat(byteData));

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java
index ad82ebf..22742aa 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyInteger.java
@@ -51,6 +51,10 @@ public class LazyInteger extends
 
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       data.set(parseInt(bytes.getData(), start, length, 10));
       isNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
index a9779a0..c0d52b9 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyLong.java
@@ -51,6 +51,10 @@ public class LazyLong extends
 
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       data.set(parseLong(bytes.getData(), start, length, 10));
       isNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyShort.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyShort.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyShort.java
index f04e131..b8b9488 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyShort.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyShort.java
@@ -48,6 +48,10 @@ public class LazyShort extends
 
   @Override
   public void init(ByteArrayRef bytes, int start, int length) {
+    if (!LazyUtils.isNumberMaybe(bytes.getData(), start, length)) {
+      isNull = true;
+      return;
+    }
     try {
       data.set(parseShort(bytes.getData(), start, length));
       isNull = false;

http://git-wip-us.apache.org/repos/asf/hive/blob/98049182/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
index 5c58f6b..a5e4be4 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/LazyUtils.java
@@ -81,6 +81,34 @@ public final class LazyUtils {
   }
 
   /**
+   * returns false, when the bytes definitely cannot be parsed into a base-10
+   * Number (Long or a Double)
+   * 
+   * If it returns true, the bytes might still be invalid, but not obviously.
+   */
+
+  public static boolean isNumberMaybe(byte[] buf, int offset, int len) {
+    switch (len) {
+    case 0:
+      return false;
+    case 1:
+      // space usually
+      return Character.isDigit(buf[offset]);
+    case 2:
+      // \N or -1 (allow latter)
+      return Character.isDigit(buf[offset + 1])
+          || Character.isDigit(buf[offset + 0]);
+    case 4:
+      // null or NULL
+      if (buf[offset] == 'N' || buf[offset] == 'n') {
+        return false;
+      }
+    }
+    // maybe valid - too expensive to check without a parse
+    return true;
+  }
+
+  /**
    * Returns -1 if the first byte sequence is lexicographically less than the
    * second; returns +1 if the second byte sequence is lexicographically less
    * than the first; otherwise return 0.


[37/41] hive git commit: HIVE-11695 : If user have no permission to create LOCAL DIRECTORY ,the Hql does not throw any exception and fail silently. (WangMeng via Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11695 : If user have no permission to  create LOCAL DIRECTORY ,the Hql does not throw any exception and fail silently. (WangMeng via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b934a804
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b934a804
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b934a804

Branch: refs/heads/llap
Commit: b934a804a75b7e01cf5ff043db4cc54c82d91641
Parents: c7de9b9
Author: WangMeng <me...@qiyi.com>
Authored: Wed Sep 16 21:00:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Sep 18 10:10:47 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b934a804/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index 6a19cc3..7e257e5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -132,7 +132,10 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
           try {
             // create the destination if it does not exist
             if (!dstFs.exists(targetPath)) {
-              FileUtils.mkdir(dstFs, targetPath, false, conf);
+              if (!FileUtils.mkdir(dstFs, targetPath, false, conf)) {
+                throw new HiveException(
+                    "Failed to create local target directory for copy:" + targetPath);
+              }
             }
           } catch (IOException e) {
             throw new HiveException("Unable to create target directory for copy" + targetPath, e);


[20/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/ql_rewrite_gbtoidx_cbo_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ql_rewrite_gbtoidx_cbo_1.q.out b/ql/src/test/results/clientpositive/spark/ql_rewrite_gbtoidx_cbo_1.q.out
index 02e24c3..878d026 100644
--- a/ql/src/test/results/clientpositive/spark/ql_rewrite_gbtoidx_cbo_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/ql_rewrite_gbtoidx_cbo_1.q.out
@@ -97,11 +97,11 @@ STAGE PLANS:
                   Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: l_shipdate (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: l_shipdate
                     Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
-                      keys: _col0 (type: string)
+                      aggregations: count(l_shipdate)
+                      keys: l_shipdate (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
@@ -264,15 +264,15 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: $hdt$_0:default__lineitem_ix_lineitem_ix_lshipdate_idx__
+                  alias: default__lineitem_ix_lineitem_ix_lshipdate_idx__
                   Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint)
-                    outputColumnNames: _col0, _count_of_l_shipdate
+                    outputColumnNames: l_shipdate, _count_of_l_shipdate
                     Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_count_of_l_shipdate)
-                      keys: _col0 (type: string)
+                      keys: l_shipdate (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
@@ -912,15 +912,15 @@ STAGE PLANS:
         Map 2 
             Map Operator Tree:
                 TableScan
-                  alias: null-subquery1:$hdt$_0-subquery1:$hdt$_0:default__lineitem_ix_lineitem_ix_lshipdate_idx__
+                  alias: null-subquery1:$hdt$_0-subquery1:default__lineitem_ix_lineitem_ix_lshipdate_idx__
                   Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint)
-                    outputColumnNames: _col0, _count_of_l_shipdate
+                    outputColumnNames: l_shipdate, _count_of_l_shipdate
                     Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_count_of_l_shipdate)
-                      keys: _col0 (type: string)
+                      keys: l_shipdate (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
@@ -1004,11 +1004,11 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 1 (type: int)
-                      outputColumnNames: _col0
+                      outputColumnNames: key
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        aggregations: count(_col0)
-                        keys: _col0 (type: int)
+                        aggregations: count(key)
+                        keys: key (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1058,15 +1058,15 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: $hdt$_0:default__tbl_tbl_key_idx__
+                  alias: default__tbl_tbl_key_idx__
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int), _count_of_key (type: bigint)
-                    outputColumnNames: _col0, _count_of_key
+                    outputColumnNames: key, _count_of_key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
                       aggregations: sum(_count_of_key)
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1168,11 +1168,11 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: $hdt$_0:default__tbl_tbl_key_idx__
+                  alias: default__tbl_tbl_key_idx__
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int), _count_of_key (type: bigint)
-                    outputColumnNames: _col0, _count_of_key
+                    outputColumnNames: key, _count_of_key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
                       aggregations: sum(_count_of_key)
@@ -1226,10 +1226,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1280,11 +1280,11 @@ STAGE PLANS:
                   alias: tbl
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
-                    expressions: value (type: int), key (type: int)
-                    outputColumnNames: _col0, _col1
+                    expressions: key (type: int), value (type: int)
+                    outputColumnNames: key, value
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int), _col1 (type: int)
+                      keys: key (type: int), value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1301,7 +1301,7 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Select Operator
-                  expressions: _col1 (type: int)
+                  expressions: _col0 (type: int)
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   File Output Operator
@@ -1343,10 +1343,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 3 (type: int)
-                      outputColumnNames: _col0
+                      outputColumnNames: key
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int)
+                        keys: key (type: int)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1401,10 +1401,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int)
-                      outputColumnNames: _col0
+                      outputColumnNames: key
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int)
+                        keys: key (type: int)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1514,11 +1514,11 @@ STAGE PLANS:
                   alias: tbl
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
-                    expressions: value (type: int), key (type: int)
-                    outputColumnNames: _col0, _col1
+                    expressions: key (type: int), value (type: int)
+                    outputColumnNames: key, value
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int), _col1 (type: int)
+                      keys: key (type: int), value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1534,17 +1534,13 @@ STAGE PLANS:
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: int), _col0 (type: int)
-                  outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1577,10 +1573,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 1 (type: int)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: key, value
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), _col1 (type: int)
+                        keys: key (type: int), value (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1632,10 +1628,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1687,10 +1683,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1742,10 +1738,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1797,10 +1793,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: key (type: int), value (type: int)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int), _col1 (type: int)
+                      keys: key (type: int), value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1855,10 +1851,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 2 (type: int)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: key, value
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), _col1 (type: int)
+                        keys: key (type: int), value (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1913,10 +1909,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: 3 (type: int), 2 (type: int)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: key, value
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), _col1 (type: int)
+                        keys: key (type: int), value (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1969,20 +1965,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (value = key) (type: boolean)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int), value (type: int)
+                    Group By Operator
+                      keys: key (type: int), value (type: int)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int), _col1 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: int)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int), _col1 (type: int)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -2142,10 +2134,10 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Select Operator
                       expressions: key (type: int), 2 (type: int)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: key, value
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                       Group By Operator
-                        keys: _col0 (type: int), _col1 (type: int)
+                        keys: key (type: int), value (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -2283,22 +2275,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (key < 10) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: int)
-                      outputColumnNames: _col0
+                    Group By Operator
+                      aggregations: count(key)
+                      keys: key (type: int)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col0)
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -2381,18 +2369,18 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: $hdt$_0:default__tblpart_tbl_part_index__
+                  alias: default__tblpart_tbl_part_index__
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: (key < 10) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: key (type: int), _count_of_key (type: bigint)
-                      outputColumnNames: _col0, _count_of_key
+                      outputColumnNames: key, _count_of_key
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
                         aggregations: sum(_count_of_key)
-                        keys: _col0 (type: int)
+                        keys: key (type: int)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
@@ -2496,11 +2484,11 @@ STAGE PLANS:
                   Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
-                      keys: _col0 (type: int)
+                      aggregations: count(key)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
@@ -2576,15 +2564,15 @@ STAGE PLANS:
         Map 1 
             Map Operator Tree:
                 TableScan
-                  alias: $hdt$_0:default__tbl_tbl_key_idx__
+                  alias: default__tbl_tbl_key_idx__
                   Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: int), _count_of_key (type: bigint)
-                    outputColumnNames: _col0, _count_of_key
+                    outputColumnNames: key, _count_of_key
                     Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: sum(_count_of_key)
-                      keys: _col0 (type: int)
+                      keys: key (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
index cb0920e..c88e53b 100644
--- a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
@@ -90,10 +90,10 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                      aggregations: count(), count(a), count(b), count(c), count(d)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
@@ -146,10 +146,10 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                      aggregations: count(), count(a), count(b), count(c), count(d)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/subquery_in.q.out b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
index bfcdaa8..b2a1681 100644
--- a/ql/src/test/results/clientpositive/spark/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/subquery_in.q.out
@@ -646,20 +646,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: ((key > '9') and value is not null) (type: boolean)
                     Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator
@@ -829,20 +825,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union11.q.out b/ql/src/test/results/clientpositive/spark/union11.q.out
index a6b9367..ab7222e 100644
--- a/ql/src/test/results/clientpositive/spark/union11.q.out
+++ b/ql/src/test/results/clientpositive/spark/union11.q.out
@@ -36,17 +36,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -54,22 +56,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -78,10 +80,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -92,22 +94,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst2' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 7 
             Reduce Operator Tree:
@@ -115,22 +117,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst3' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
 
   Stage: Stage-0
@@ -155,6 +157,6 @@ POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'tst1' as key, count
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-tst2	1
 tst3	1
 tst1	1
+tst2	1

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union14.q.out b/ql/src/test/results/clientpositive/spark/union14.q.out
index 0c9542b..f276d7f 100644
--- a/ql/src/test/results/clientpositive/spark/union14.q.out
+++ b/ql/src/test/results/clientpositive/spark/union14.q.out
@@ -42,28 +42,30 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Map 3 
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -72,10 +74,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -86,22 +88,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union15.q.out b/ql/src/test/results/clientpositive/spark/union15.q.out
index cb8bc75..fc1eb83 100644
--- a/ql/src/test/results/clientpositive/spark/union15.q.out
+++ b/ql/src/test/results/clientpositive/spark/union15.q.out
@@ -34,17 +34,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Map 4 
             Map Operator Tree:
@@ -60,12 +62,12 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -73,22 +75,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -97,10 +99,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union28.q.out b/ql/src/test/results/clientpositive/spark/union28.q.out
index 59657cc..7ee06fe 100644
--- a/ql/src/test/results/clientpositive/spark/union28.q.out
+++ b/ql/src/test/results/clientpositive/spark/union28.q.out
@@ -73,10 +73,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union30.q.out b/ql/src/test/results/clientpositive/spark/union30.q.out
index fd907f9..12eda1d 100644
--- a/ql/src/test/results/clientpositive/spark/union30.q.out
+++ b/ql/src/test/results/clientpositive/spark/union30.q.out
@@ -87,10 +87,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union33.q.out b/ql/src/test/results/clientpositive/spark/union33.q.out
index 271c377..25e545c 100644
--- a/ql/src/test/results/clientpositive/spark/union33.q.out
+++ b/ql/src/test/results/clientpositive/spark/union33.q.out
@@ -72,11 +72,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -205,11 +205,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union5.q.out b/ql/src/test/results/clientpositive/spark/union5.q.out
index 06a5d90..af0528a 100644
--- a/ql/src/test/results/clientpositive/spark/union5.q.out
+++ b/ql/src/test/results/clientpositive/spark/union5.q.out
@@ -31,17 +31,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -49,22 +51,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -73,10 +75,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -87,22 +89,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst2' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
 
   Stage: Stage-0
@@ -123,5 +125,5 @@ POSTHOOK: query: select unionsrc.key, count(1) FROM (select 'tst1' as key, count
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 #### A masked pattern was here ####
-tst2	1
 tst1	1
+tst2	1

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union7.q.out b/ql/src/test/results/clientpositive/spark/union7.q.out
index 4a81283..181d04c 100644
--- a/ql/src/test/results/clientpositive/spark/union7.q.out
+++ b/ql/src/test/results/clientpositive/spark/union7.q.out
@@ -30,17 +30,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Map 4 
             Map Operator Tree:
@@ -56,12 +58,12 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -69,22 +71,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 3 
             Reduce Operator Tree:
@@ -93,10 +95,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
index 246594f..a4af758 100644
--- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
@@ -81,10 +81,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
index ee5462d..c8df568 100644
--- a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
@@ -1258,10 +1258,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ws_order_number (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: ws_order_number
                     Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: ws_order_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index 7e161d1..cb2d56b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -54,11 +54,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: cint, cdecimal1, cdecimal2
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), count()
-                      keys: _col0 (type: int)
+                      aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
+                      keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                       Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
@@ -159,11 +159,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: cint, cdecimal1, cdecimal2
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), avg(_col1), stddev_pop(_col1), stddev_samp(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), avg(_col2), stddev_pop(_col2), stddev_samp(_col2), count()
-                      keys: _col0 (type: int)
+                      aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
+                      keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
                       Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
index 322270f..52c00f9 100644
--- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
@@ -128,34 +128,38 @@ STAGE PLANS:
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: s (type: string), t (type: tinyint)
-                    outputColumnNames: _col0, _col1
+                    expressions: t (type: tinyint), s (type: string)
+                    outputColumnNames: t, s
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: tinyint)
+                      keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: tinyint)
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: string), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
index 3d6a236..2255f72 100644
--- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
@@ -128,19 +128,19 @@ STAGE PLANS:
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: s (type: string), t (type: tinyint), b (type: bigint)
-                    outputColumnNames: _col0, _col1, _col2
+                    expressions: t (type: tinyint), s (type: string), b (type: bigint)
+                    outputColumnNames: t, s, b
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col2)
-                      keys: _col0 (type: string), _col1 (type: tinyint)
+                      aggregations: max(b)
+                      keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: tinyint)
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized
@@ -148,17 +148,21 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index be39d0d..bbc66fc 100644
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@ -65,20 +65,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Local Work:
               Map Reduce Local Work
@@ -270,20 +266,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Local Work:
               Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
index 6bbb6b9..3363c8b 100644
--- a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
@@ -126,11 +126,11 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: bo (type: boolean), b (type: bigint)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: bo, b
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: boolean)
+                      aggregations: max(b)
+                      keys: bo (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
index 3ad059c..ba33bfb 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
@@ -133,10 +133,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctinyint
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(ctinyint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -439,10 +439,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cbigint (type: bigint)
-                    outputColumnNames: _col0
+                    outputColumnNames: cbigint
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(cbigint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -745,10 +745,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cfloat (type: float)
-                    outputColumnNames: _col0
+                    outputColumnNames: cfloat
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(cfloat)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -998,10 +998,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: cbigint, cfloat, ctinyint
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), stddev_pop(_col0), var_samp(_col0), count(), sum(_col1), min(_col2)
+                        aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE


[25/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1cce5f00
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1cce5f00
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1cce5f00

Branch: refs/heads/llap
Commit: 1cce5f006c595e67a4169851ceb352646759bc27
Parents: 201b1a0
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Sep 16 09:41:25 2015 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Sep 16 09:41:25 2015 -0700

----------------------------------------------------------------------
 .../rules/HiveAggregateProjectMergeRule.java    |  151 ++
 .../calcite/rules/HiveRelFieldTrimmer.java      |  145 +-
 .../translator/PlanModifierForASTConv.java      |    4 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    2 +
 .../alter_partition_coltype.q.out               |    8 +-
 .../clientpositive/annotate_stats_groupby.q.out |  106 +-
 .../annotate_stats_groupby2.q.out               |   28 +-
 .../results/clientpositive/auto_join18.q.out    |   12 +-
 .../auto_join18_multi_distinct.q.out            |   12 +-
 .../results/clientpositive/auto_join27.q.out    |   18 +-
 .../results/clientpositive/auto_join32.q.out    |    4 +-
 .../clientpositive/binarysortable_1.q.out       |  Bin 4329 -> 4325 bytes
 .../clientpositive/correlationoptimizer2.q.out  |  220 +-
 .../clientpositive/correlationoptimizer6.q.out  |  232 +-
 ql/src/test/results/clientpositive/count.q.out  |   14 +-
 .../results/clientpositive/ctas_colname.q.out   |   52 +-
 .../test/results/clientpositive/database.q.out  |    2 +-
 .../clientpositive/decimal_precision.q.out      |    4 +-
 .../results/clientpositive/decimal_udf.q.out    |   30 +-
 .../results/clientpositive/distinct_stats.q.out |   14 +-
 .../dynpart_sort_opt_vectorization.q.out        |  105 +-
 .../dynpart_sort_optimization.q.out             |  105 +-
 ...ryption_select_read_only_encrypted_tbl.q.out |    4 +-
 .../clientpositive/explain_logical.q.out        |   78 +-
 .../clientpositive/fetch_aggregation.q.out      |    4 +-
 .../test/results/clientpositive/gby_star.q.out  |   54 +-
 .../test/results/clientpositive/groupby12.q.out |    6 +-
 .../results/clientpositive/groupby5_map.q.out   |    4 +-
 .../clientpositive/groupby5_map_skew.q.out      |    4 +-
 .../results/clientpositive/groupby_cube1.q.out  |   12 +-
 .../groupby_distinct_samekey.q.out              |    6 +-
 .../clientpositive/groupby_grouping_sets2.q.out |   10 +-
 .../clientpositive/groupby_grouping_sets3.q.out |   12 +-
 .../clientpositive/groupby_grouping_sets5.q.out |    8 +-
 .../clientpositive/groupby_grouping_sets6.q.out |    8 +-
 .../clientpositive/groupby_position.q.out       |   36 +-
 .../clientpositive/groupby_resolution.q.out     |   60 +-
 .../clientpositive/groupby_rollup1.q.out        |   12 +-
 .../clientpositive/groupby_sort_10.q.out        |    8 +-
 .../clientpositive/groupby_sort_11.q.out        |   10 +-
 .../results/clientpositive/groupby_sort_8.q.out |   12 +-
 ql/src/test/results/clientpositive/having.q.out |   62 +-
 .../test/results/clientpositive/having2.q.out   |   12 +-
 .../clientpositive/index_auto_mult_tables.q.out |   12 +-
 .../clientpositive/index_auto_self_join.q.out   |   12 +-
 .../clientpositive/index_auto_update.q.out      |    6 +-
 .../index_bitmap_auto_partitioned.q.out         |    6 +-
 .../index_bitmap_compression.q.out              |    6 +-
 .../infer_bucket_sort_dyn_part.q.out            |    4 +-
 .../infer_bucket_sort_map_operators.q.out       |    4 +-
 ql/src/test/results/clientpositive/join18.q.out |   12 +-
 .../clientpositive/join18_multi_distinct.q.out  |   12 +-
 ql/src/test/results/clientpositive/join31.q.out |   36 +-
 .../limit_partition_metadataonly.q.out          |    4 +-
 .../results/clientpositive/limit_pushdown.q.out |   36 +-
 .../test/results/clientpositive/lineage2.q.out  |    2 +-
 .../test/results/clientpositive/lineage3.q.out  |    4 +-
 .../list_bucket_query_multiskew_3.q.out         |    2 +-
 .../clientpositive/mapjoin_mapjoin.q.out        |   32 +-
 .../clientpositive/metadata_only_queries.q.out  |    4 +-
 .../results/clientpositive/metadataonly1.q.out  |  112 +-
 .../results/clientpositive/multiMapJoin2.q.out  |  226 +-
 .../nonblock_op_deduplicate.q.out               |    8 +-
 .../results/clientpositive/nonmr_fetch.q.out    |   14 +-
 .../clientpositive/partition_multilevels.q.out  |    8 +-
 .../test/results/clientpositive/ppd_gby.q.out   |   12 +-
 .../test/results/clientpositive/ppd_gby2.q.out  |   60 +-
 .../clientpositive/ppd_join_filter.q.out        |   98 +-
 .../ql_rewrite_gbtoidx_cbo_1.q.out              |  168 +-
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |   94 +-
 .../reduce_deduplicate_extended.q.out           |   32 +-
 .../clientpositive/selectDistinctStar.q.out     |   44 +-
 .../clientpositive/spark/auto_join18.q.out      |   10 +-
 .../spark/auto_join18_multi_distinct.q.out      |   12 +-
 .../clientpositive/spark/auto_join27.q.out      |   18 +-
 .../clientpositive/spark/auto_join32.q.out      |   53 +-
 .../results/clientpositive/spark/count.q.out    |   14 +-
 .../clientpositive/spark/groupby5_map.q.out     |    4 +-
 .../spark/groupby5_map_skew.q.out               |    4 +-
 .../clientpositive/spark/groupby_cube1.q.out    |   12 +-
 .../clientpositive/spark/groupby_position.q.out |   18 +-
 .../spark/groupby_resolution.q.out              |   60 +-
 .../clientpositive/spark/groupby_rollup1.q.out  |   12 +-
 .../results/clientpositive/spark/having.q.out   |   62 +-
 .../spark/infer_bucket_sort_map_operators.q.out |    4 +-
 .../results/clientpositive/spark/join18.q.out   |   10 +-
 .../spark/join18_multi_distinct.q.out           |   12 +-
 .../results/clientpositive/spark/join31.q.out   |   36 +-
 .../spark/limit_partition_metadataonly.q.out    |    4 +-
 .../clientpositive/spark/limit_pushdown.q.out   |   34 +-
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |   24 +-
 .../spark/metadata_only_queries.q.out           |    4 +-
 .../clientpositive/spark/ppd_join_filter.q.out  |   90 +-
 .../spark/ql_rewrite_gbtoidx_cbo_1.q.out        |  168 +-
 .../clientpositive/spark/stats_only_null.q.out  |    8 +-
 .../clientpositive/spark/subquery_in.q.out      |   36 +-
 .../results/clientpositive/spark/union11.q.out  |   42 +-
 .../results/clientpositive/spark/union14.q.out  |   28 +-
 .../results/clientpositive/spark/union15.q.out  |   28 +-
 .../results/clientpositive/spark/union28.q.out  |    4 +-
 .../results/clientpositive/spark/union30.q.out  |    4 +-
 .../results/clientpositive/spark/union33.q.out  |    8 +-
 .../results/clientpositive/spark/union5.q.out   |   34 +-
 .../results/clientpositive/spark/union7.q.out   |   28 +-
 .../clientpositive/spark/union_remove_21.q.out  |    4 +-
 .../spark/vector_count_distinct.q.out           |    4 +-
 .../spark/vector_decimal_aggregate.q.out        |   12 +-
 .../spark/vector_distinct_2.q.out               |   28 +-
 .../clientpositive/spark/vector_groupby_3.q.out |   30 +-
 .../spark/vector_mapjoin_reduce.q.out           |   36 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    6 +-
 .../clientpositive/spark/vectorization_0.q.out  |   16 +-
 .../clientpositive/spark/vectorization_13.q.out |   32 +-
 .../clientpositive/spark/vectorization_15.q.out |   16 +-
 .../clientpositive/spark/vectorization_16.q.out |   16 +-
 .../clientpositive/spark/vectorization_9.q.out  |   16 +-
 .../spark/vectorization_pushdown.q.out          |    4 +-
 .../spark/vectorization_short_regress.q.out     |   74 +-
 .../spark/vectorized_nested_mapjoin.q.out       |   18 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   12 +-
 .../clientpositive/stats_only_null.q.out        |    8 +-
 .../results/clientpositive/stats_ppr_all.q.out  |   16 +-
 .../subq_where_serialization.q.out              |   18 +-
 .../clientpositive/subquery_exists_having.q.out |   48 +-
 .../results/clientpositive/subquery_in.q.out    |   36 +-
 .../clientpositive/subquery_in_having.q.out     |  260 +-
 .../clientpositive/subquery_notexists.q.out     |   18 +-
 .../subquery_notexists_having.q.out             |   26 +-
 .../results/clientpositive/subquery_notin.q.out |   24 +-
 .../subquery_notin_having.q.java1.7.out         |   50 +-
 .../subquery_unqualcolumnrefs.q.out             |   74 +-
 .../results/clientpositive/subquery_views.q.out |    8 +-
 .../test/results/clientpositive/tez/count.q.out |   14 +-
 .../tez/dynamic_partition_pruning.q.out         |   88 +-
 .../tez/dynpart_sort_opt_vectorization.q.out    |   90 +-
 .../tez/dynpart_sort_optimization.q.out         |   89 +-
 .../clientpositive/tez/explainuser_1.q.out      | 2319 +++++++++---------
 .../clientpositive/tez/explainuser_2.q.out      |  782 +++---
 .../results/clientpositive/tez/having.q.out     |   62 +-
 .../clientpositive/tez/limit_pushdown.q.out     |   34 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    |   24 +-
 .../tez/metadata_only_queries.q.out             |    4 +-
 .../clientpositive/tez/metadataonly1.q.out      |   44 +-
 .../test/results/clientpositive/tez/mrr.q.out   |   94 +-
 .../clientpositive/tez/selectDistinctStar.q.out |   44 +-
 .../clientpositive/tez/stats_only_null.q.out    |    8 +-
 .../clientpositive/tez/subquery_in.q.out        |   36 +-
 .../results/clientpositive/tez/tez_dml.q.out    |    6 +-
 .../results/clientpositive/tez/union5.q.out     |   44 +-
 .../results/clientpositive/tez/union7.q.out     |   28 +-
 .../clientpositive/tez/unionDistinct_1.q.out    |    8 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |    4 +-
 .../tez/vector_binary_join_groupby.q.out        |    4 +-
 .../tez/vector_count_distinct.q.out             |    4 +-
 .../tez/vector_decimal_aggregate.q.out          |   12 +-
 .../tez/vector_decimal_precision.q.out          |    4 +-
 .../clientpositive/tez/vector_decimal_udf.q.out |   30 +-
 .../clientpositive/tez/vector_distinct_2.q.out  |   28 +-
 .../clientpositive/tez/vector_groupby_3.q.out   |   30 +-
 .../tez/vector_groupby_reduce.q.out             |    8 +-
 .../tez/vector_grouping_sets.q.out              |    8 +-
 .../tez/vector_mapjoin_reduce.q.out             |   36 +-
 .../clientpositive/tez/vector_orderby_5.q.out   |    6 +-
 .../clientpositive/tez/vector_outer_join2.q.out |   20 +-
 .../tez/vector_partition_diff_num_cols.q.out    |   20 +-
 .../tez/vector_partitioned_date_time.q.out      |   12 +-
 .../tez/vector_reduce_groupby_decimal.q.out     |   24 +-
 .../clientpositive/tez/vectorization_0.q.out    |   16 +-
 .../clientpositive/tez/vectorization_13.q.out   |   32 +-
 .../clientpositive/tez/vectorization_15.q.out   |   16 +-
 .../clientpositive/tez/vectorization_16.q.out   |   16 +-
 .../clientpositive/tez/vectorization_9.q.out    |   16 +-
 .../tez/vectorization_limit.q.out               |   14 +-
 .../tez/vectorization_pushdown.q.out            |    4 +-
 .../tez/vectorization_short_regress.q.out       |   74 +-
 .../tez/vectorized_distinct_gby.q.out           |    8 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   88 +-
 .../tez/vectorized_nested_mapjoin.q.out         |   18 +-
 .../clientpositive/tez/vectorized_parquet.q.out |    6 +-
 .../tez/vectorized_timestamp_funcs.q.out        |   12 +-
 ql/src/test/results/clientpositive/udf8.q.out   |    4 +-
 .../test/results/clientpositive/udf_count.q.out |   16 +-
 .../test/results/clientpositive/union11.q.out   |   70 +-
 .../test/results/clientpositive/union14.q.out   |   32 +-
 .../test/results/clientpositive/union15.q.out   |   38 +-
 .../test/results/clientpositive/union28.q.out   |    8 +-
 .../test/results/clientpositive/union30.q.out   |    8 +-
 .../test/results/clientpositive/union33.q.out   |    8 +-
 ql/src/test/results/clientpositive/union5.q.out |   48 +-
 ql/src/test/results/clientpositive/union7.q.out |   32 +-
 .../clientpositive/unionDistinct_1.q.out        |    8 +-
 .../clientpositive/union_remove_21.q.out        |    8 +-
 .../clientpositive/vector_aggregate_9.q.out     |    4 +-
 .../vector_aggregate_without_gby.q.out          |    4 +-
 .../vector_binary_join_groupby.q.out            |    4 +-
 .../clientpositive/vector_count_distinct.q.out  |    6 +-
 .../vector_decimal_aggregate.q.out              |   12 +-
 .../vector_decimal_precision.q.out              |    4 +-
 .../clientpositive/vector_decimal_udf.q.out     |   30 +-
 .../clientpositive/vector_distinct_2.q.out      |   28 +-
 .../clientpositive/vector_groupby_3.q.out       |   30 +-
 .../clientpositive/vector_groupby_reduce.q.out  |    8 +-
 .../clientpositive/vector_grouping_sets.q.out   |    8 +-
 .../clientpositive/vector_left_outer_join.q.out |    8 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |   36 +-
 .../clientpositive/vector_orderby_5.q.out       |    6 +-
 .../clientpositive/vector_outer_join1.q.out     |    8 +-
 .../clientpositive/vector_outer_join2.q.out     |   28 +-
 .../clientpositive/vector_outer_join3.q.out     |   24 +-
 .../clientpositive/vector_outer_join4.q.out     |    8 +-
 .../clientpositive/vector_outer_join5.q.out     |   48 +-
 .../vector_partition_diff_num_cols.q.out        |   20 +-
 .../vector_partitioned_date_time.q.out          |   12 +-
 .../vector_reduce_groupby_decimal.q.out         |   24 +-
 .../clientpositive/vectorization_0.q.out        |   16 +-
 .../clientpositive/vectorization_13.q.out       |   32 +-
 .../clientpositive/vectorization_15.q.out       |   16 +-
 .../clientpositive/vectorization_16.q.out       |   16 +-
 .../clientpositive/vectorization_9.q.out        |   16 +-
 .../clientpositive/vectorization_limit.q.out    |   16 +-
 .../clientpositive/vectorization_pushdown.q.out |    4 +-
 .../vectorization_short_regress.q.out           |   74 +-
 .../vectorized_distinct_gby.q.out               |   12 +-
 .../vectorized_nested_mapjoin.q.out             |   26 +-
 .../clientpositive/vectorized_parquet.q.out     |    6 +-
 .../vectorized_parquet_types.q.out              |    6 +-
 .../vectorized_timestamp_funcs.q.out            |   12 +-
 227 files changed, 4818 insertions(+), 5017 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
new file mode 100644
index 0000000..53f04ee
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveAggregateProjectMergeRule.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
+
+import org.apache.calcite.plan.RelOptRule;
+import org.apache.calcite.plan.RelOptRuleCall;
+import org.apache.calcite.plan.RelOptUtil;
+import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
+import org.apache.calcite.rex.RexInputRef;
+import org.apache.calcite.rex.RexNode;
+import org.apache.calcite.util.ImmutableBitSet;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Planner rule that recognizes a {@link HiveAggregate}
+ * on top of a {@link HiveProject} and if possible
+ * aggregate through the project or removes the project.
+ *
+ * <p>This is only possible when the grouping expressions and arguments to
+ * the aggregate functions are field references (i.e. not expressions).
+ *
+ * <p>In some cases, this rule has the effect of trimming: the aggregate will
+ * use fewer columns than the project did.
+ */
+public class HiveAggregateProjectMergeRule extends RelOptRule {
+  public static final HiveAggregateProjectMergeRule INSTANCE =
+      new HiveAggregateProjectMergeRule();
+
+  /** Private constructor. */
+  private HiveAggregateProjectMergeRule() {
+    super(
+        operand(HiveAggregate.class,
+            operand(HiveProject.class, any())));
+  }
+
+  @Override
+  public void onMatch(RelOptRuleCall call) {
+    final HiveAggregate aggregate = call.rel(0);
+    final HiveProject project = call.rel(1);
+    RelNode x = apply(aggregate, project);
+    if (x != null) {
+      call.transformTo(x);
+    }
+  }
+
+  public static RelNode apply(HiveAggregate aggregate,
+      HiveProject project) {
+    final List<Integer> newKeys = Lists.newArrayList();
+    final Map<Integer, Integer> map = new HashMap<>();
+    for (int key : aggregate.getGroupSet()) {
+      final RexNode rex = project.getProjects().get(key);
+      if (rex instanceof RexInputRef) {
+        final int newKey = ((RexInputRef) rex).getIndex();
+        newKeys.add(newKey);
+        map.put(key, newKey);
+      } else {
+        // Cannot handle "GROUP BY expression"
+        return null;
+      }
+    }
+
+    final ImmutableBitSet newGroupSet = aggregate.getGroupSet().permute(map);
+    ImmutableList<ImmutableBitSet> newGroupingSets = null;
+    if (aggregate.indicator) {
+      newGroupingSets =
+          ImmutableBitSet.ORDERING.immutableSortedCopy(
+              ImmutableBitSet.permute(aggregate.getGroupSets(), map));
+    }
+
+    final ImmutableList.Builder<AggregateCall> aggCalls =
+        ImmutableList.builder();
+    for (AggregateCall aggregateCall : aggregate.getAggCallList()) {
+      final ImmutableList.Builder<Integer> newArgs = ImmutableList.builder();
+      for (int arg : aggregateCall.getArgList()) {
+        final RexNode rex = project.getProjects().get(arg);
+        if (rex instanceof RexInputRef) {
+          newArgs.add(((RexInputRef) rex).getIndex());
+        } else {
+          // Cannot handle "AGG(expression)"
+          return null;
+        }
+      }
+      final int newFilterArg;
+      if (aggregateCall.filterArg >= 0) {
+        final RexNode rex = project.getProjects().get(aggregateCall.filterArg);
+        if (!(rex instanceof RexInputRef)) {
+          return null;
+        }
+        newFilterArg = ((RexInputRef) rex).getIndex();
+      } else {
+        newFilterArg = -1;
+      }
+      aggCalls.add(aggregateCall.copy(newArgs.build(), newFilterArg));
+    }
+
+    final Aggregate newAggregate =
+        aggregate.copy(aggregate.getTraitSet(), project.getInput(),
+            aggregate.indicator, newGroupSet, newGroupingSets,
+            aggCalls.build());
+
+    // Add a project if the group set is not in the same order or
+    // contains duplicates.
+    RelNode rel = newAggregate;
+    if (!newKeys.equals(newGroupSet.asList())) {
+      final List<Integer> posList = Lists.newArrayList();
+      for (int newKey : newKeys) {
+        posList.add(newGroupSet.indexOf(newKey));
+      }
+      if (aggregate.indicator) {
+        for (int newKey : newKeys) {
+          posList.add(aggregate.getGroupCount() + newGroupSet.indexOf(newKey));
+        }
+      }
+      for (int i = newAggregate.getGroupCount()
+                   + newAggregate.getIndicatorCount();
+           i < newAggregate.getRowType().getFieldCount(); i++) {
+        posList.add(i);
+      }
+      rel = RelOptUtil.createProject(HiveProject.DEFAULT_PROJECT_FACTORY,
+          rel, posList);
+    }
+
+    return rel;
+  }
+}
+
+// End AggregateProjectMergeRule.java

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
index 4144674..a12fa2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelFieldTrimmer.java
@@ -24,11 +24,10 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.calcite.plan.RelOptUtil;
-import org.apache.calcite.rel.RelCollation;
-import org.apache.calcite.rel.RelFieldCollation;
 import org.apache.calcite.rel.RelNode;
+import org.apache.calcite.rel.core.Aggregate;
+import org.apache.calcite.rel.core.AggregateCall;
 import org.apache.calcite.rel.core.RelFactories;
-import org.apache.calcite.rel.metadata.RelMetadataQuery;
 import org.apache.calcite.rel.type.RelDataType;
 import org.apache.calcite.rel.type.RelDataTypeField;
 import org.apache.calcite.rex.RexNode;
@@ -37,20 +36,19 @@ import org.apache.calcite.rex.RexVisitor;
 import org.apache.calcite.sql.validate.SqlValidator;
 import org.apache.calcite.sql2rel.RelFieldTrimmer;
 import org.apache.calcite.util.ImmutableBitSet;
-import org.apache.calcite.util.Util;
 import org.apache.calcite.util.mapping.IntPair;
 import org.apache.calcite.util.mapping.Mapping;
 import org.apache.calcite.util.mapping.MappingType;
 import org.apache.calcite.util.mapping.Mappings;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin;
 
+import com.google.common.base.Function;
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
 
 public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
-  public HiveRelFieldTrimmer(SqlValidator validator) {
-    super(validator);
-  }
+  private final RelFactories.AggregateFactory aggregateFactory;
 
   public HiveRelFieldTrimmer(SqlValidator validator,
       RelFactories.ProjectFactory projectFactory,
@@ -62,6 +60,7 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
       RelFactories.SetOpFactory setOpFactory) {
     super(validator, projectFactory, filterFactory, joinFactory,
             semiJoinFactory, sortFactory, aggregateFactory, setOpFactory);
+    this.aggregateFactory = aggregateFactory;
   }
 
   /**
@@ -156,27 +155,127 @@ public class HiveRelFieldTrimmer extends RelFieldTrimmer {
 
     return new TrimResult(newJoin, mapping);
   }
-
-  protected TrimResult trimChild(
-      RelNode rel,
-      RelNode input,
+  /**
+   * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
+   * {@link org.apache.calcite.rel.logical.LogicalAggregate}.
+   */
+  @Override
+  public TrimResult trimFields(
+      Aggregate aggregate,
       ImmutableBitSet fieldsUsed,
       Set<RelDataTypeField> extraFields) {
-    Util.discard(rel);
-    if (input.getClass().getName().endsWith("MedMdrClassExtentRel")) {
-      // MedMdrJoinRule cannot handle Join of Project of
-      // MedMdrClassExtentRel, only naked MedMdrClassExtentRel.
-      // So, disable trimming.
-      fieldsUsed = ImmutableBitSet.range(input.getRowType().getFieldCount());
+    // Fields:
+    //
+    // | sys fields | group fields | indicator fields | agg functions |
+    //
+    // Two kinds of trimming:
+    //
+    // 1. If agg rel has system fields but none of these are used, create an
+    // agg rel with no system fields.
+    //
+    // 2. If aggregate functions are not used, remove them.
+    //
+    // But group and indicator fields stay, even if they are not used.
+
+    final RelDataType rowType = aggregate.getRowType();
+
+    // Compute which input fields are used.
+    // 1. group fields are always used
+    final ImmutableBitSet.Builder inputFieldsUsed =
+        ImmutableBitSet.builder(aggregate.getGroupSet());
+    // 2. agg functions
+    for (AggregateCall aggCall : aggregate.getAggCallList()) {
+      for (int i : aggCall.getArgList()) {
+        inputFieldsUsed.set(i);
+      }
+      if (aggCall.filterArg >= 0) {
+        inputFieldsUsed.set(aggCall.filterArg);
+      }
+    }
+
+    // Create input with trimmed columns.
+    final RelNode input = aggregate.getInput();
+    final Set<RelDataTypeField> inputExtraFields = Collections.emptySet();
+    final TrimResult trimResult =
+        trimChild(aggregate, input, inputFieldsUsed.build(), inputExtraFields);
+    final RelNode newInput = trimResult.left;
+    final Mapping inputMapping = trimResult.right;
+
+    // We have to return group keys and (if present) indicators.
+    // So, pretend that the consumer asked for them.
+    final int groupCount = aggregate.getGroupSet().cardinality();
+    final int indicatorCount = aggregate.getIndicatorCount();
+    fieldsUsed =
+        fieldsUsed.union(ImmutableBitSet.range(groupCount + indicatorCount));
+
+    // If the input is unchanged, and we need to project all columns,
+    // there's nothing to do.
+    if (input == newInput
+        && fieldsUsed.equals(ImmutableBitSet.range(rowType.getFieldCount()))) {
+      return new TrimResult(
+          aggregate,
+          Mappings.createIdentity(rowType.getFieldCount()));
     }
-    final ImmutableList<RelCollation> collations =
-        RelMetadataQuery.collations(input);
-    for (RelCollation collation : collations) {
-      for (RelFieldCollation fieldCollation : collation.getFieldCollations()) {
-        fieldsUsed = fieldsUsed.set(fieldCollation.getFieldIndex());
+
+    // Which agg calls are used by our consumer?
+    int j = groupCount + indicatorCount;
+    int usedAggCallCount = 0;
+    for (int i = 0; i < aggregate.getAggCallList().size(); i++) {
+      if (fieldsUsed.get(j++)) {
+        ++usedAggCallCount;
       }
     }
-    return dispatchTrimFields(input, fieldsUsed, extraFields);
+
+    // Offset due to the number of system fields having changed.
+    Mapping mapping =
+        Mappings.create(
+            MappingType.INVERSE_SURJECTION,
+            rowType.getFieldCount(),
+            groupCount + indicatorCount + usedAggCallCount);
+
+    final ImmutableBitSet newGroupSet =
+        Mappings.apply(inputMapping, aggregate.getGroupSet());
+
+    final ImmutableList<ImmutableBitSet> newGroupSets =
+        ImmutableList.copyOf(
+            Iterables.transform(aggregate.getGroupSets(),
+                new Function<ImmutableBitSet, ImmutableBitSet>() {
+                  @Override
+                  public ImmutableBitSet apply(ImmutableBitSet input) {
+                    return Mappings.apply(inputMapping, input);
+                  }
+                }));
+
+    // Populate mapping of where to find the fields. System, group key and
+    // indicator fields first.
+    for (j = 0; j < groupCount + indicatorCount; j++) {
+      mapping.set(j, j);
+    }
+
+    // Now create new agg calls, and populate mapping for them.
+    final List<AggregateCall> newAggCallList = new ArrayList<>();
+    j = groupCount + indicatorCount;
+    for (AggregateCall aggCall : aggregate.getAggCallList()) {
+      if (fieldsUsed.get(j)) {
+        AggregateCall newAggCall =
+            aggCall.copy(Mappings.apply2(inputMapping, aggCall.getArgList()),
+                Mappings.apply(inputMapping, aggCall.filterArg));
+        if (newAggCall.equals(aggCall)) {
+          newAggCall = aggCall; // immutable -> canonize to save space
+        }
+        mapping.set(j, groupCount + indicatorCount + newAggCallList.size());
+        newAggCallList.add(newAggCall);
+      }
+      ++j;
+    }
+
+    RelNode newAggregate = aggregateFactory.createAggregate(newInput,
+        aggregate.indicator, newGroupSet, newGroupSets, newAggCallList);
+
+    assert newAggregate.getClass() == aggregate.getClass();
+
+    return new TrimResult(newAggregate, mapping);
   }
 
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
index 67f17c2..16a375c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
@@ -265,8 +265,8 @@ public class PlanModifierForASTConv {
 
     // TODO: Verify GB having is not a separate filter (if so we shouldn't
     // introduce derived table)
-    if (parent instanceof Filter || parent instanceof Join
-        || parent instanceof SetOp) {
+    if (parent instanceof Filter || parent instanceof Join || parent instanceof SetOp ||
+       (parent instanceof Aggregate && filterNode.getInputs().get(0) instanceof Aggregate)) {
       validParent = false;
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index d5c747f..0a7ce3a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -134,6 +134,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion;
+import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveAggregateProjectMergeRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveExpandDistinctAggregatesRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterJoinRule;
 import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveFilterProjectTransposeRule;
@@ -883,6 +884,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
       hepPgmBldr.addRuleInstance(ProjectRemoveRule.INSTANCE);
       hepPgmBldr.addRuleInstance(UnionMergeRule.INSTANCE);
       hepPgmBldr.addRuleInstance(new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY));
+      hepPgmBldr.addRuleInstance(HiveAggregateProjectMergeRule.INSTANCE);
 
       hepPgm = hepPgmBldr.build();
       HepPlanner hepPlanner = new HepPlanner(hepPgm);

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
index 9fc3c8d..a42b464 100644
--- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
+++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
@@ -232,8 +232,8 @@ STAGE PLANS:
               name: default.alter_coltype
             name: default.alter_coltype
       Truncated Path -> Alias:
-        /alter_coltype/dt=100/ts=3.0 [$hdt$_0:alter_coltype]
-        /alter_coltype/dt=100/ts=6.30 [$hdt$_0:alter_coltype]
+        /alter_coltype/dt=100/ts=3.0 [alter_coltype]
+        /alter_coltype/dt=100/ts=6.30 [alter_coltype]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -411,7 +411,7 @@ STAGE PLANS:
               name: default.alter_coltype
             name: default.alter_coltype
       Truncated Path -> Alias:
-        /alter_coltype/dt=100/ts=6.30 [$hdt$_0:alter_coltype]
+        /alter_coltype/dt=100/ts=6.30 [alter_coltype]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -568,7 +568,7 @@ STAGE PLANS:
               name: default.alter_coltype
             name: default.alter_coltype
       Truncated Path -> Alias:
-        /alter_coltype/dt=100/ts=3.0 [$hdt$_0:alter_coltype]
+        /alter_coltype/dt=100/ts=3.0 [alter_coltype]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
index 82cc0da..1b9ec68 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out
@@ -157,11 +157,11 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string), _col1 (type: int)
+                keys: state (type: string), locid (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 752 Basic stats: COMPLETE Column stats: PARTIAL
@@ -178,22 +178,18 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 7 Data size: 658 Basic stats: COMPLETE Column stats: PARTIAL
-          Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint), _col1 (type: int)
+          Group By Operator
+            aggregations: min(_col1)
+            keys: _col0 (type: string), _col2 (type: bigint)
+            mode: hash
             outputColumnNames: _col0, _col1, _col2
-            Statistics: Num rows: 7 Data size: 658 Basic stats: COMPLETE Column stats: PARTIAL
-            Group By Operator
-              aggregations: min(_col2)
-              keys: _col0 (type: string), _col1 (type: bigint)
-              mode: hash
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            Statistics: Num rows: 7 Data size: 686 Basic stats: COMPLETE Column stats: PARTIAL
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -255,10 +251,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: year (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: year
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: year (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
@@ -308,10 +304,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int)
+                keys: state (type: string), locid (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE
@@ -361,10 +357,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
@@ -415,10 +411,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
@@ -469,10 +465,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE
@@ -523,10 +519,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
@@ -577,10 +573,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE
@@ -631,10 +627,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE
@@ -689,10 +685,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: year (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: year
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: year (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
@@ -742,10 +738,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE
@@ -798,10 +794,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), zip (type: bigint)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, zip
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: bigint)
+                keys: state (type: string), zip (type: bigint)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL
@@ -851,10 +847,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
@@ -905,10 +901,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
@@ -959,10 +955,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
@@ -1013,10 +1009,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE
@@ -1067,10 +1063,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE
@@ -1121,10 +1117,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE
@@ -1175,10 +1171,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: year (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: year
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: year (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
@@ -1228,10 +1224,10 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), locid (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, locid
               Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: int), '0' (type: string)
+                keys: state (type: string), locid (type: int), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
index 2cb1e84..be3fa1d 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out
@@ -91,10 +91,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: state (type: string), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
@@ -144,10 +144,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                keys: state (type: string), country (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 80 Data size: 800 Basic stats: COMPLETE Column stats: NONE
@@ -202,10 +202,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: state (type: string), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE
@@ -257,10 +257,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
             Select Operator
               expressions: state (type: string), votes (type: bigint)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, votes
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: bigint)
+                keys: state (type: string), votes (type: bigint)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL
@@ -310,10 +310,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                keys: state (type: string), country (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 32 Data size: 8256 Basic stats: COMPLETE Column stats: COMPLETE
@@ -364,10 +364,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: state (type: string), country (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE
@@ -417,10 +417,10 @@ STAGE PLANS:
             Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: state (type: string), country (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: state, country
               Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                keys: state (type: string), country (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 80 Data size: 20640 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/auto_join18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join18.q.out b/ql/src/test/results/clientpositive/auto_join18.q.out
index 6dc7a63..7fd7dd1 100644
--- a/ql/src/test/results/clientpositive/auto_join18.q.out
+++ b/ql/src/test/results/clientpositive/auto_join18.q.out
@@ -40,11 +40,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -140,11 +140,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT value)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out
index cc17ad1..0a9dd76 100644
--- a/ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/auto_join18_multi_distinct.q.out
@@ -42,11 +42,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -142,11 +142,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1), count(DISTINCT _col0)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT value), count(DISTINCT key)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/auto_join27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join27.q.out b/ql/src/test/results/clientpositive/auto_join27.q.out
index 16a7f02..9c03c78 100644
--- a/ql/src/test/results/clientpositive/auto_join27.q.out
+++ b/ql/src/test/results/clientpositive/auto_join27.q.out
@@ -39,20 +39,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) < 200.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/auto_join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/auto_join32.q.out b/ql/src/test/results/clientpositive/auto_join32.q.out
index f862870..161ab6b 100644
--- a/ql/src/test/results/clientpositive/auto_join32.q.out
+++ b/ql/src/test/results/clientpositive/auto_join32.q.out
@@ -411,10 +411,10 @@ STAGE PLANS:
                   outputColumnNames: _col1, _col3
                   Select Operator
                     expressions: _col3 (type: string), _col1 (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: _col3, _col1
                     Group By Operator
                       aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: _col3 (type: string), _col1 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/binarysortable_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/binarysortable_1.q.out b/ql/src/test/results/clientpositive/binarysortable_1.q.out
index 421fd2e..9ef9221 100644
Binary files a/ql/src/test/results/clientpositive/binarysortable_1.q.out and b/ql/src/test/results/clientpositive/binarysortable_1.q.out differ

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer2.q.out b/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
index c1a20c8..96c7660 100644
--- a/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
+++ b/ql/src/test/results/clientpositive/correlationoptimizer2.q.out
@@ -41,22 +41,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -144,22 +140,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -228,44 +220,36 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
           TableScan
             alias: y
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Demux Operator
           Statistics: Num rows: 263 Data size: 2755 Basic stats: COMPLETE Column stats: NONE
@@ -411,11 +395,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -511,11 +495,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -592,11 +576,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -611,11 +595,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -770,11 +754,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -870,11 +854,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -951,11 +935,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -970,11 +954,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1129,11 +1113,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1229,11 +1213,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1310,11 +1294,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1329,11 +1313,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1489,10 +1473,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1614,10 +1598,10 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1703,10 +1687,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1720,10 +1704,10 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -2029,22 +2013,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -2144,22 +2124,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Demux Operator
           Statistics: Num rows: 276 Data size: 2854 Basic stats: COMPLETE Column stats: NONE


[12/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index c649883..236fa18 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -193,14 +193,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4182 Basic stats: COMPLETE Column stats: NONE
@@ -212,7 +212,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4182 Basic stats: COMPLETE Column stats: NONE
@@ -251,19 +251,15 @@ STAGE PLANS:
                     1 _col0 (type: bigint)
                   outputColumnNames: _col1
                   Statistics: Num rows: 24 Data size: 5060 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: _col1 (type: bigint)
-                    outputColumnNames: _col0
-                    Statistics: Num rows: 24 Data size: 5060 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      aggregations: count(), sum(_col0)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                  Group By Operator
+                    aggregations: count(), sum(_col1)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
                       Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: bigint), _col1 (type: bigint)
+                      value expressions: _col0 (type: bigint), _col1 (type: bigint)
       Local Work:
         Map Reduce Local Work
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 502f2d3..821a642 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -193,14 +193,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE
@@ -212,7 +212,7 @@ STAGE PLANS:
                 keys:
                   0 _col0 (type: int)
                   1 _col0 (type: int)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE
@@ -334,14 +334,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE
@@ -353,7 +353,7 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: string)
                   1 _col0 (type: string)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE
@@ -475,14 +475,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE
@@ -494,7 +494,7 @@ STAGE PLANS:
                 keys:
                   0 _col3 (type: string), _col1 (type: bigint)
                   1 _col1 (type: string), _col0 (type: bigint)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 20 Data size: 4400 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index 50ae497..fad7685 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -859,14 +859,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE
@@ -878,7 +878,7 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 30 Data size: 6680 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
index bbe8ba1..2cb80b6 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
@@ -85,11 +85,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:st 
+        $hdt$_1:st 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:st 
+        $hdt$_1:st 
           TableScan
             alias: st
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -193,11 +193,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -307,11 +307,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -421,11 +421,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -539,14 +539,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:s 
+        $hdt$_2:s 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -558,7 +558,7 @@ STAGE PLANS:
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
                   1 (_col0 pmod UDFToLong(8)) (type: bigint)
-        $hdt$_0:$hdt$_2:s 
+        $hdt$_2:s 
           TableScan
             alias: s
             Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: NONE
@@ -732,11 +732,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:st 
+        $hdt$_1:st 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:st 
+        $hdt$_1:st 
           TableScan
             alias: st
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -840,11 +840,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -954,11 +954,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -1068,11 +1068,11 @@ STAGE PLANS:
   Stage: Stage-5
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -1186,14 +1186,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:s 
+        $hdt$_2:s 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:sm 
+        $hdt$_1:sm 
           TableScan
             alias: sm
             Statistics: Num rows: 100 Data size: 392 Basic stats: COMPLETE Column stats: NONE
@@ -1205,7 +1205,7 @@ STAGE PLANS:
                 keys:
                   0 UDFToLong(_col1) (type: bigint)
                   1 (_col0 pmod UDFToLong(8)) (type: bigint)
-        $hdt$_0:$hdt$_2:s 
+        $hdt$_2:s 
           TableScan
             alias: s
             Statistics: Num rows: 6058 Data size: 48464 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_partition_diff_num_cols.q.out b/ql/src/test/results/clientpositive/vector_partition_diff_num_cols.q.out
index 0437ff6..f483614 100644
--- a/ql/src/test/results/clientpositive/vector_partition_diff_num_cols.q.out
+++ b/ql/src/test/results/clientpositive/vector_partition_diff_num_cols.q.out
@@ -95,10 +95,10 @@ STAGE PLANS:
             Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: inv_quantity_on_hand (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: inv_quantity_on_hand
               Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(inv_quantity_on_hand)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -214,10 +214,10 @@ STAGE PLANS:
             Statistics: Num rows: 200 Data size: 11876 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: inv_quantity_on_hand (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: inv_quantity_on_hand
               Statistics: Num rows: 200 Data size: 11876 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(inv_quantity_on_hand)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -334,10 +334,10 @@ STAGE PLANS:
             Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: inv_quantity_on_hand (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: inv_quantity_on_hand
               Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(inv_quantity_on_hand)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -435,10 +435,10 @@ STAGE PLANS:
             Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: inv_quantity_on_hand (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: inv_quantity_on_hand
               Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(inv_quantity_on_hand)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -542,10 +542,10 @@ STAGE PLANS:
             Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: inv_quantity_on_hand (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: inv_quantity_on_hand
               Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(inv_quantity_on_hand)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
index 83e7f19..a2762a3 100644
--- a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
@@ -376,11 +376,11 @@ STAGE PLANS:
             Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: fl_date (type: date)
-              outputColumnNames: _col0
+              outputColumnNames: fl_date
               Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: date)
+                keys: fl_date (type: date)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
@@ -1153,11 +1153,11 @@ STAGE PLANS:
             Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: fl_date (type: date)
-              outputColumnNames: _col0
+              outputColumnNames: fl_date
               Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: date)
+                keys: fl_date (type: date)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
@@ -1954,11 +1954,11 @@ STAGE PLANS:
             Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: fl_time (type: timestamp)
-              outputColumnNames: _col0
+              outputColumnNames: fl_time
               Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: timestamp)
+                keys: fl_time (type: timestamp)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index b38add3..77dc175 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -39,22 +39,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (cdecimal1 is not null and cdecimal2 is not null) (type: boolean)
               Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                outputColumnNames: _col0, _col1, _col2, _col3
+              Group By Operator
+                aggregations: min(cdecimal1)
+                keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: min(_col2)
-                  keys: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
+                  sort order: ++++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                   Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                    sort order: ++++
-                    Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                    Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col4 (type: decimal(20,10))
+                  value expressions: _col4 (type: decimal(20,10))
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out
index 89163cd..3671055 100644
--- a/ql/src/test/results/clientpositive/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_0.q.out
@@ -130,10 +130,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint)
-              outputColumnNames: _col0
+              outputColumnNames: ctinyint
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(ctinyint)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -443,10 +443,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cbigint (type: bigint)
-              outputColumnNames: _col0
+              outputColumnNames: cbigint
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(cbigint)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -756,10 +756,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cfloat (type: float)
-              outputColumnNames: _col0
+              outputColumnNames: cfloat
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(cfloat)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -1014,10 +1014,10 @@ STAGE PLANS:
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: cbigint, cfloat, ctinyint
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: avg(_col0), stddev_pop(_col0), var_samp(_col0), count(), sum(_col1), min(_col2)
+                  aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
index 95cb09a..38966d1 100644
--- a/ql/src/test/results/clientpositive/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
@@ -84,31 +84,31 @@ STAGE PLANS:
               predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                 Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                  keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                  aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                  keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                   Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                    key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                     sort order: +++++
-                    Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                    Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-          keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
           Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+            expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -340,31 +340,31 @@ STAGE PLANS:
               predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
               Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                 Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                  keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                  aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                  keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                   Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                    key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                     sort order: +++++
-                    Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                    Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-          keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
           Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+            expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out
index da0e8e0..be06613 100644
--- a/ql/src/test/results/clientpositive/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
@@ -80,31 +80,31 @@ STAGE PLANS:
               predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                outputColumnNames: ctinyint, cint, cfloat, cdouble, cstring1, ctimestamp1, cboolean1
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_samp(_col0), min(_col2), stddev_samp(_col4), var_pop(_col4), var_samp(_col5), stddev_pop(_col5)
-                  keys: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                  aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                  keys: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                    key expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                     sort order: +++++++
-                    Map-reduce partition columns: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                    Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
-          keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: int), KEY._col2 (type: float), KEY._col3 (type: double), KEY._col4 (type: string), KEY._col5 (type: timestamp), KEY._col6 (type: boolean)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col5)) (type: double), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - UDFToDouble(_col5))) (type: double), _col12 (type: double)
+            expressions: _col2 (type: float), _col6 (type: boolean), _col3 (type: double), _col4 (type: string), _col0 (type: tinyint), _col1 (type: int), _col5 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col1)) (type: double), _col8 (type: double), (_col3 * 79.553) (type: double), (33.0 % _col2) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col3) (type: double), (- _col0) (type: tinyint), _col11 (type: double), (UDFToFloat(_col1) - _col2) (type: float), (-23 % UDFToInteger(_col0)) (type: int), (- (-26.28 - UDFToDouble(_col1))) (type: double), _col12 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
             Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_16.q.out b/ql/src/test/results/clientpositive/vectorization_16.q.out
index 6ae3b4e..6ee9957 100644
--- a/ql/src/test/results/clientpositive/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_16.q.out
@@ -57,31 +57,31 @@ STAGE PLANS:
               predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
               Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                outputColumnNames: _col0, _col1, _col2
+                expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                outputColumnNames: cdouble, cstring1, ctimestamp1
                 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                  keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                     sort order: +++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+          keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+            expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
             Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_9.q.out b/ql/src/test/results/clientpositive/vectorization_9.q.out
index 6ae3b4e..6ee9957 100644
--- a/ql/src/test/results/clientpositive/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_9.q.out
@@ -57,31 +57,31 @@ STAGE PLANS:
               predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
               Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                outputColumnNames: _col0, _col1, _col2
+                expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                outputColumnNames: cdouble, cstring1, ctimestamp1
                 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                  keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                  aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                  keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                     sort order: +++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                    Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-          keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+          keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
           Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+            expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
             Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
             File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out
index 7691a4d..9ff888c 100644
--- a/ql/src/test/results/clientpositive/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out
@@ -250,10 +250,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint)
-              outputColumnNames: _col0
+              outputColumnNames: ctinyint
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: tinyint)
+                keys: ctinyint (type: tinyint)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -334,11 +334,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cdouble (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ctinyint, cdouble
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: tinyint), _col1 (type: double)
+                aggregations: count(DISTINCT cdouble)
+                keys: ctinyint (type: tinyint), cdouble (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -452,11 +452,11 @@ STAGE PLANS:
               Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cdouble (type: double), ctinyint (type: tinyint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: cdouble, ctinyint
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: sum(_col1)
-                  keys: _col0 (type: double)
+                  aggregations: sum(ctinyint)
+                  keys: cdouble (type: double)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
index 7205376..3aff6a3 100644
--- a/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_pushdown.q.out
@@ -20,10 +20,10 @@ STAGE PLANS:
               Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint)
-                outputColumnNames: _col0
+                outputColumnNames: cbigint
                 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: avg(_col0)
+                  aggregations: avg(cbigint)
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index 728f628..570e649 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -151,10 +151,10 @@ STAGE PLANS:
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                outputColumnNames: cint, cdouble, csmallint, cfloat, ctinyint
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: avg(_col0), sum(_col1), stddev_pop(_col0), stddev_samp(_col2), var_samp(_col0), avg(_col3), stddev_samp(_col0), min(_col4), count(_col2)
+                  aggregations: avg(cint), sum(cdouble), stddev_pop(cint), stddev_samp(csmallint), var_samp(cint), avg(cfloat), stddev_samp(cint), min(ctinyint), count(csmallint)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                   Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
@@ -357,10 +357,10 @@ STAGE PLANS:
               Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                outputColumnNames: cint, cbigint, csmallint, cdouble, ctinyint
                 Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col0), var_pop(_col1), stddev_pop(_col2), max(_col3), avg(_col4), min(_col0), min(_col3), stddev_samp(_col2), var_samp(_col0)
+                  aggregations: max(cint), var_pop(cbigint), stddev_pop(csmallint), max(cdouble), avg(ctinyint), min(cint), min(cdouble), stddev_samp(csmallint), var_samp(cint)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -554,10 +554,10 @@ STAGE PLANS:
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                outputColumnNames: cbigint, ctinyint, csmallint, cint, cdouble
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: var_pop(_col0), count(), max(_col1), stddev_pop(_col2), max(_col3), stddev_samp(_col4), count(_col1), avg(_col1)
+                  aggregations: var_pop(cbigint), count(), max(ctinyint), stddev_pop(csmallint), max(cint), stddev_samp(cdouble), count(ctinyint), avg(ctinyint)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                   Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -730,10 +730,10 @@ STAGE PLANS:
               Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
-                outputColumnNames: _col0, _col1, _col2, _col3
+                outputColumnNames: ctinyint, cbigint, cint, cfloat
                 Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: avg(_col0), max(_col1), stddev_samp(_col2), var_pop(_col2), var_pop(_col1), max(_col3)
+                  aggregations: avg(ctinyint), max(cbigint), stddev_samp(cint), var_pop(cint), var_pop(cbigint), max(cfloat)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                   Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -1825,11 +1825,11 @@ STAGE PLANS:
               Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: csmallint (type: smallint), cbigint (type: bigint), ctinyint (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2
+                outputColumnNames: csmallint, cbigint, ctinyint
                 Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_samp(_col0), sum(_col1), var_pop(_col2), count()
-                  keys: _col0 (type: smallint)
+                  aggregations: stddev_samp(csmallint), sum(cbigint), var_pop(ctinyint), count()
+                  keys: csmallint (type: smallint)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
@@ -2035,11 +2035,11 @@ STAGE PLANS:
               Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cdouble (type: double), cfloat (type: float)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: cdouble, cfloat
                 Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
-                  keys: _col0 (type: double)
+                  aggregations: var_samp(cdouble), count(cfloat), sum(cfloat), var_pop(cdouble), stddev_pop(cdouble), sum(cdouble)
+                  keys: cdouble (type: double)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                   Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
@@ -2292,31 +2292,31 @@ STAGE PLANS:
               predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
+                outputColumnNames: cstring1, ctimestamp1, cint, csmallint, ctinyint, cfloat, cdouble
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: stddev_pop(_col2), avg(_col3), count(), min(_col4), var_samp(_col3), var_pop(_col5), avg(_col2), var_samp(_col5), avg(_col5), min(_col6), var_pop(_col3), stddev_pop(_col4), sum(_col2)
-                  keys: _col0 (type: timestamp), _col1 (type: string)
+                  aggregations: stddev_pop(cint), avg(csmallint), count(), min(ctinyint), var_samp(csmallint), var_pop(cfloat), avg(cint), var_samp(cfloat), avg(cfloat), min(cdouble), var_pop(csmallint), stddev_pop(ctinyint), sum(cint)
+                  keys: cstring1 (type: string), ctimestamp1 (type: timestamp)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
-                    key expressions: _col0 (type: timestamp), _col1 (type: string)
+                    key expressions: _col0 (type: string), _col1 (type: timestamp)
                     sort order: ++
-                    Map-reduce partition columns: _col0 (type: timestamp), _col1 (type: string)
+                    Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,input:smallint>), _col4 (type: bigint), _col5 (type: tinyint), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,input:float>), _col11 (type: double), _col12 (type: struct<count:bigint,sum:double,variance:double>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: bigint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: stddev_pop(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), min(VALUE._col3), var_samp(VALUE._col4), var_pop(VALUE._col5), avg(VALUE._col6), var_samp(VALUE._col7), avg(VALUE._col8), min(VALUE._col9), var_pop(VALUE._col10), stddev_pop(VALUE._col11), sum(VALUE._col12)
-          keys: KEY._col0 (type: timestamp), KEY._col1 (type: string)
+          keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
           Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (- _col2
 )) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
+            expressions: _col1 (type: timestamp), _col0 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (- _col2
 )) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
             outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38
             Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
@@ -2628,11 +2628,11 @@ STAGE PLANS:
               Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                outputColumnNames: cboolean1, cfloat, cbigint, cint, cdouble, ctinyint, csmallint
                 Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col1), sum(_col2), var_samp(_col3), avg(_col4), min(_col2), var_pop(_col2), sum(_col3), stddev_samp(_col5), stddev_pop(_col6), avg(_col3)
-                  keys: _col0 (type: boolean)
+                  aggregations: max(cfloat), sum(cbigint), var_samp(cint), avg(cdouble), min(cbigint), var_pop(cbigint), sum(cint), stddev_samp(ctinyint), stddev_pop(csmallint), avg(cint)
+                  keys: cboolean1 (type: boolean)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                   Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
@@ -2863,10 +2863,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: i (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: i
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(i)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3027,10 +3027,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint)
-              outputColumnNames: _col0
+              outputColumnNames: ctinyint
               Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(ctinyint)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3087,10 +3087,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cint (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: cint
               Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(cint)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3147,10 +3147,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cfloat (type: float)
-              outputColumnNames: _col0
+              outputColumnNames: cfloat
               Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(cfloat)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3207,10 +3207,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cstring1 (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: cstring1
               Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(cstring1)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3267,10 +3267,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cboolean1 (type: boolean)
-              outputColumnNames: _col0
+              outputColumnNames: cboolean1
               Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(cboolean1)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
index 2e041a3..b5c667f 100644
--- a/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_distinct_gby.q.out
@@ -33,12 +33,12 @@ STAGE PLANS:
             Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: a
               Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(DISTINCT _col0), count(DISTINCT _col0)
+                aggregations: sum(DISTINCT a), count(DISTINCT a)
                 bucketGroup: true
-                keys: _col0 (type: int)
+                keys: a (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
@@ -93,11 +93,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cint (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: cint
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(DISTINCT _col0), count(DISTINCT _col0), avg(DISTINCT _col0), std(DISTINCT _col0)
-                keys: _col0 (type: int)
+                aggregations: sum(DISTINCT cint), count(DISTINCT cint), avg(DISTINCT cint), std(DISTINCT cint)
+                keys: cint (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
index 82e90da..6a9532e 100644
--- a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
@@ -15,14 +15,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_0:$hdt$_0:v1 
+        $hdt$_0:$hdt$_0:v1 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_1:v1 
+        $hdt$_1:v1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_0:$hdt$_0:v1 
+        $hdt$_0:$hdt$_0:v1 
           TableScan
             alias: v1
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -37,7 +37,7 @@ STAGE PLANS:
                   keys:
                     0 _col0 (type: tinyint)
                     1 _col0 (type: tinyint)
-        $hdt$_0:$hdt$_1:v1 
+        $hdt$_1:v1 
           TableScan
             alias: v1
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -86,19 +86,15 @@ STAGE PLANS:
                         1 _col0 (type: smallint)
                       outputColumnNames: _col1
                       Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
-                      Select Operator
-                        expressions: _col1 (type: double)
+                      Group By Operator
+                        aggregations: sum(_col1)
+                        mode: hash
                         outputColumnNames: _col0
-                        Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
-                        Group By Operator
-                          aggregations: sum(_col0)
-                          mode: hash
-                          outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                        Reduce Output Operator
+                          sort order: 
                           Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                          Reduce Output Operator
-                            sort order: 
-                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                            value expressions: _col0 (type: double)
+                          value expressions: _col0 (type: double)
       Local Work:
         Map Reduce Local Work
       Execution mode: vectorized

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorized_parquet.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_parquet.q.out b/ql/src/test/results/clientpositive/vectorized_parquet.q.out
index 7b2dc6d..50e210c 100644
--- a/ql/src/test/results/clientpositive/vectorized_parquet.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_parquet.q.out
@@ -146,11 +146,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+              outputColumnNames: ctinyint, cint, csmallint, cstring1, cfloat, cdouble
               Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1), min(_col2), count(_col3), avg(_col4), stddev_pop(_col5)
-                keys: _col0 (type: tinyint)
+                aggregations: max(cint), min(csmallint), count(cstring1), avg(cfloat), stddev_pop(cdouble)
+                keys: ctinyint (type: tinyint)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out b/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
index 8d1bddc..a3bd8aa 100644
--- a/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_parquet_types.q.out
@@ -291,11 +291,11 @@ STAGE PLANS:
             Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double), cdecimal (type: decimal(4,2))
-              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+              outputColumnNames: ctinyint, cint, csmallint, cstring1, cfloat, cdouble, cdecimal
               Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1), min(_col2), count(_col3), avg(_col4), stddev_pop(_col5), max(_col6)
-                keys: _col0 (type: tinyint)
+                aggregations: max(cint), min(csmallint), count(cstring1), avg(cfloat), stddev_pop(cdouble), max(cdecimal)
+                keys: ctinyint (type: tinyint)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                 Statistics: Num rows: 22 Data size: 242 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
index f66903b..830eb92 100644
--- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out
@@ -611,10 +611,10 @@ STAGE PLANS:
             Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctimestamp1 (type: timestamp)
-              outputColumnNames: _col0
+              outputColumnNames: ctimestamp1
               Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col0), max(_col0), count(_col0), count()
+                aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count()
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
@@ -685,10 +685,10 @@ STAGE PLANS:
             Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctimestamp1 (type: timestamp)
-              outputColumnNames: _col0
+              outputColumnNames: ctimestamp1
               Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(ctimestamp1)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -768,10 +768,10 @@ STAGE PLANS:
             Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctimestamp1 (type: timestamp)
-              outputColumnNames: _col0
+              outputColumnNames: ctimestamp1
               Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col0), variance(_col0), var_pop(_col0), var_samp(_col0), std(_col0), stddev(_col0), stddev_pop(_col0), stddev_samp(_col0)
+                aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE


[32/41] hive git commit: HIVE-11838: Another positive test case for HIVE-11658 (Prasanth Jayachandran reviewed by Jason Dere)

Posted by se...@apache.org.
HIVE-11838: Another positive test case for HIVE-11658 (Prasanth Jayachandran reviewed by Jason Dere)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a12e5f5b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a12e5f5b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a12e5f5b

Branch: refs/heads/llap
Commit: a12e5f5bbc554b2b49d6aa726721aaba9c299409
Parents: 7201c26
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu Sep 17 13:36:25 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu Sep 17 13:36:25 2015 -0500

----------------------------------------------------------------------
 .../test/queries/clientpositive/load_orc_part.q |  5 ++++
 .../results/clientpositive/load_orc_part.q.out  | 26 ++++++++++++++++++++
 2 files changed, 31 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a12e5f5b/ql/src/test/queries/clientpositive/load_orc_part.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/load_orc_part.q b/ql/src/test/queries/clientpositive/load_orc_part.q
index 2ff884d..2902c72 100644
--- a/ql/src/test/queries/clientpositive/load_orc_part.q
+++ b/ql/src/test/queries/clientpositive/load_orc_part.q
@@ -17,3 +17,8 @@ alter table orc_test add partition(ds='11');
 alter table orc_test partition(ds='11') set fileformat textfile;
 load data local inpath '../../data/files/kv1.txt' into table orc_test partition(ds='11');
 dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=11/;
+
+alter table orc_test add partition(ds='12');
+alter table orc_test partition(ds='12') set fileformat textfile;
+load data local inpath '../../data/files/types/primitives' into table orc_test partition(ds='12');
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/orc_test/ds=12/;

http://git-wip-us.apache.org/repos/asf/hive/blob/a12e5f5b/ql/src/test/results/clientpositive/load_orc_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/load_orc_part.q.out b/ql/src/test/results/clientpositive/load_orc_part.q.out
index 2e02c2e..16346cd 100644
--- a/ql/src/test/results/clientpositive/load_orc_part.q.out
+++ b/ql/src/test/results/clientpositive/load_orc_part.q.out
@@ -86,3 +86,29 @@ POSTHOOK: type: LOAD
 POSTHOOK: Output: default@orc_test@ds=11
 Found 1 items
 #### A masked pattern was here ####
+PREHOOK: query: alter table orc_test add partition(ds='12')
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@orc_test
+POSTHOOK: query: alter table orc_test add partition(ds='12')
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@orc_test
+POSTHOOK: Output: default@orc_test@ds=12
+PREHOOK: query: alter table orc_test partition(ds='12') set fileformat textfile
+PREHOOK: type: ALTERPARTITION_FILEFORMAT
+PREHOOK: Input: default@orc_test
+PREHOOK: Output: default@orc_test@ds=12
+POSTHOOK: query: alter table orc_test partition(ds='12') set fileformat textfile
+POSTHOOK: type: ALTERPARTITION_FILEFORMAT
+POSTHOOK: Input: default@orc_test
+POSTHOOK: Input: default@orc_test@ds=12
+POSTHOOK: Output: default@orc_test@ds=12
+PREHOOK: query: load data local inpath '../../data/files/types/primitives' into table orc_test partition(ds='12')
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@orc_test@ds=12
+POSTHOOK: query: load data local inpath '../../data/files/types/primitives' into table orc_test partition(ds='12')
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@orc_test@ds=12
+Found 4 items
+#### A masked pattern was here ####


[40/41] hive git commit: HIVE-11886 : LLAP: merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
index ada310c,6aa39f1..ab2d856
--- a/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
@@@ -153,13 -152,18 +153,17 @@@ STAGE PLANS
                  mode: mergepartial
                  outputColumnNames: _col0, _col1, _col2
                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
+                   outputColumnNames: _col0, _col1, _col2
                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                   table:
-                       input format: org.apache.hadoop.mapred.TextInputFormat
-                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 -            Execution mode: vectorized
  
    Stage: Stage-0
      Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
index 01f8f65,d33b0ed..d2937a5
--- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
@@@ -109,22 -109,17 +109,18 @@@ STAGE PLANS
                    Filter Operator
                      predicate: l_partkey is not null (type: boolean)
                      Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: l_partkey (type: int)
+                     Group By Operator
+                       keys: l_partkey (type: int)
+                       mode: hash
                        outputColumnNames: _col0
                        Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: int)
-                         mode: hash
-                         outputColumnNames: _col0
+                       Reduce Output Operator
+                         key expressions: _col0 (type: int)
+                         sort order: +
+                         Map-reduce partition columns: _col0 (type: int)
                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: int)
-                           sort order: +
-                           Map-reduce partition columns: _col0 (type: int)
-                           Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
          Reducer 4 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
                  keys: KEY._col0 (type: int)
@@@ -252,22 -248,17 +248,18 @@@ STAGE PLANS
                    Filter Operator
                      predicate: l_partkey is not null (type: boolean)
                      Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: l_partkey (type: int)
+                     Group By Operator
+                       keys: l_partkey (type: int)
+                       mode: hash
                        outputColumnNames: _col0
                        Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: int)
-                         mode: hash
-                         outputColumnNames: _col0
+                       Reduce Output Operator
+                         key expressions: _col0 (type: int)
+                         sort order: +
+                         Map-reduce partition columns: _col0 (type: int)
                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: int)
-                           sort order: +
-                           Map-reduce partition columns: _col0 (type: int)
-                           Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
          Reducer 4 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
                  keys: KEY._col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index d14e1b7,14aa777..1cd5959
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@@ -44,25 -44,20 +44,21 @@@ STAGE PLANS
                    Filter Operator
                      predicate: (cdecimal1 is not null and cdecimal2 is not null) (type: boolean)
                      Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                       outputColumnNames: _col0, _col1, _col2, _col3
+                     Group By Operator
+                       aggregations: min(cdecimal1)
+                       keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
+                       mode: hash
+                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                        Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         aggregations: min(_col2)
-                         keys: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                         mode: hash
-                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                       Reduce Output Operator
+                         key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
+                         sort order: ++++
+                         Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                          Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                           sort order: ++++
-                           Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                           Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                           value expressions: _col4 (type: decimal(20,10))
+                         value expressions: _col4 (type: decimal(20,10))
              Execution mode: vectorized
          Reducer 2 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
                  aggregations: min(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/vector_outer_join2.q.out
index 3058423,236fa18..ce855ef
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@@ -251,22 -251,18 +251,18 @@@ STAGE PLANS
                      1 _col0 (type: bigint)
                    outputColumnNames: _col1
                    Statistics: Num rows: 24 Data size: 5060 Basic stats: COMPLETE Column stats: NONE
-                   Select Operator
-                     expressions: _col1 (type: bigint)
-                     outputColumnNames: _col0
-                     Statistics: Num rows: 24 Data size: 5060 Basic stats: COMPLETE Column stats: NONE
-                     Group By Operator
-                       aggregations: count(), sum(_col0)
-                       mode: hash
-                       outputColumnNames: _col0, _col1
+                   Group By Operator
+                     aggregations: count(), sum(_col1)
+                     mode: hash
+                     outputColumnNames: _col0, _col1
+                     Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                     Reduce Output Operator
+                       sort order: 
                        Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                       Reduce Output Operator
-                         sort order: 
-                         Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                         value expressions: _col0 (type: bigint), _col1 (type: bigint)
+                       value expressions: _col0 (type: bigint), _col1 (type: bigint)
 +      Execution mode: vectorized
        Local Work:
          Map Reduce Local Work
 -      Execution mode: vectorized
        Reduce Operator Tree:
          Group By Operator
            aggregations: count(VALUE._col0), sum(VALUE._col1)

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vector_outer_join5.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
index 85b8cc0,6a9532e..75acec9
--- a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out
@@@ -86,22 -86,18 +86,18 @@@ STAGE PLANS
                          1 _col0 (type: smallint)
                        outputColumnNames: _col1
                        Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
-                       Select Operator
-                         expressions: _col1 (type: double)
+                       Group By Operator
+                         aggregations: sum(_col1)
+                         mode: hash
                          outputColumnNames: _col0
-                         Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
-                         Group By Operator
-                           aggregations: sum(_col0)
-                           mode: hash
-                           outputColumnNames: _col0
+                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                         Reduce Output Operator
+                           sort order: 
                            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                           Reduce Output Operator
-                             sort order: 
-                             Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                             value expressions: _col0 (type: double)
+                           value expressions: _col0 (type: double)
 +      Execution mode: vectorized
        Local Work:
          Map Reduce Local Work
 -      Execution mode: vectorized
        Reduce Operator Tree:
          Group By Operator
            aggregations: sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
----------------------------------------------------------------------
diff --cc serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
index c9807bf,cbad3b2..77e6de0
--- a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
@@@ -19,8 -19,11 +19,10 @@@
  package org.apache.hadoop.hive.serde2;
  
  import java.util.ArrayList;
 -import java.util.Arrays;
  import java.util.List;
  
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.util.StringUtils;
  


[09/41] hive git commit: HIVE-11824: Insert to local directory causes staging directory to be copied (Prasanth Jayachandran reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11824: Insert to local directory causes staging directory to be copied (Prasanth Jayachandran reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/03f46b2f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/03f46b2f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/03f46b2f

Branch: refs/heads/llap
Commit: 03f46b2f2eb0235e9a2079079e6d643e8cf7c89e
Parents: ba0b33c
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Wed Sep 16 01:08:02 2015 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Wed Sep 16 01:08:02 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/MoveTask.java    | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/03f46b2f/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
index a1f8973..6a19cc3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
@@ -127,10 +127,22 @@ public class MoveTask extends Task<MoveWork> implements Serializable {
       LocalFileSystem dstFs = FileSystem.getLocal(conf);
 
       if (dstFs.delete(targetPath, true) || !dstFs.exists(targetPath)) {
-        console.printInfo(mesg, mesg_detail);
         // if source exists, rename. Otherwise, create a empty directory
         if (fs.exists(sourcePath)) {
-          fs.copyToLocalFile(sourcePath, targetPath);
+          try {
+            // create the destination if it does not exist
+            if (!dstFs.exists(targetPath)) {
+              FileUtils.mkdir(dstFs, targetPath, false, conf);
+            }
+          } catch (IOException e) {
+            throw new HiveException("Unable to create target directory for copy" + targetPath, e);
+          }
+
+          FileSystem srcFs = sourcePath.getFileSystem(conf);
+          FileStatus[] srcs = srcFs.listStatus(sourcePath, FileUtils.HIDDEN_FILES_PATH_FILTER);
+          for (FileStatus status : srcs) {
+            fs.copyToLocalFile(status.getPath(), targetPath);
+          }
         } else {
           if (!dstFs.mkdirs(targetPath)) {
             throw new HiveException("Unable to make local directory: "


[33/41] hive git commit: HIVE-11815 : Correct the column/table names in subquery expression when creating a view (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11815 : Correct the column/table names in subquery expression when creating a view (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8da2ed30
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8da2ed30
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8da2ed30

Branch: refs/heads/llap
Commit: 8da2ed304891dc8483fe3d78eda4c9f70c54ae18
Parents: a12e5f5
Author: Pengcheng Xiong <px...@apache.org>
Authored: Thu Sep 17 13:20:00 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Thu Sep 17 13:20:00 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/parse/QBSubQuery.java |   7 --
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |  11 --
 .../queries/clientpositive/subquery_views.q     |  22 +++-
 .../subquery_exists_implicit_gby.q.out          |   8 +-
 .../subquery_nested_subquery.q.out              |   4 +-
 .../subquery_notexists_implicit_gby.q.out       |   8 +-
 .../subquery_windowing_corr.q.out               |   7 +-
 .../results/clientpositive/subquery_views.q.out | 116 +++++++++++++++++++
 8 files changed, 141 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
index 92cbabc..f95ee8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBSubQuery.java
@@ -401,7 +401,6 @@ public class QBSubQuery implements ISubQueryJoinInfo {
           CNT_ALIAS,
           subQryCorrExprs,
           sqRR);
-      SubQueryUtils.setOriginDeep(ast, QBSubQuery.this.originalSQASTOrigin);
       return ast;
     }
 
@@ -416,7 +415,6 @@ public class QBSubQuery implements ISubQueryJoinInfo {
     public ASTNode getJoinConditionAST() {
       ASTNode ast =
           SubQueryUtils.buildNotInNullJoinCond(getAlias(), CNT_ALIAS);
-      SubQueryUtils.setOriginDeep(ast, QBSubQuery.this.originalSQASTOrigin);
       return ast;
     }
 
@@ -576,8 +574,6 @@ public class QBSubQuery implements ISubQueryJoinInfo {
 
     rewrite(outerQueryRR, forHavingClause, outerQueryAlias, insertClause, selectClause);
 
-    SubQueryUtils.setOriginDeep(subQueryAST, originalSQASTOrigin);
-
     /*
      * Restriction.13.m :: In the case of an implied Group By on a
      * correlated SubQuery, the SubQuery always returns 1 row.
@@ -696,8 +692,6 @@ public class QBSubQuery implements ISubQueryJoinInfo {
       }
     }
 
-    SubQueryUtils.setOriginDeep(joinConditionAST, originalSQASTOrigin);
-    SubQueryUtils.setOriginDeep(postJoinConditionAST, originalSQASTOrigin);
   }
 
   ASTNode updateOuterQueryFilter(ASTNode outerQryFilter) {
@@ -711,7 +705,6 @@ public class QBSubQuery implements ISubQueryJoinInfo {
       return postJoinConditionAST;
     }
     ASTNode node = SubQueryUtils.andAST(outerQryFilter, postJoinConditionAST);
-    node.setOrigin(originalSQASTOrigin);
     return node;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
index 87a7ced..362a285 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
@@ -467,17 +467,6 @@ public class SubQueryUtils {
     return check;
   }
 
-  static void setOriginDeep(ASTNode node, ASTNodeOrigin origin) {
-    if ( node == null ) {
-      return;
-    }
-    node.setOrigin(origin);
-    int childCnt = node.getChildCount();
-    for(int i=0; i<childCnt; i++) {
-      setOriginDeep((ASTNode)node.getChild(i), origin);
-    }
-  }
-
   /*
    * Set of functions to create the Null Check Query for Not-In SubQuery predicates.
    * For a SubQuery predicate like:

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/queries/clientpositive/subquery_views.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/subquery_views.q b/ql/src/test/queries/clientpositive/subquery_views.q
index f15d41b..e646310 100644
--- a/ql/src/test/queries/clientpositive/subquery_views.q
+++ b/ql/src/test/queries/clientpositive/subquery_views.q
@@ -10,6 +10,8 @@ where exists
   where b.value = a.value  and a.key = b.key and a.value > 'val_9')
 ;
 
+describe extended cv1;
+
 select * 
 from cv1 where cv1.key in (select key from cv1 c where c.key > '95');
 ;
@@ -26,6 +28,8 @@ where b.key not in
   )
 ;
 
+describe extended cv2;
+
 explain
 select * 
 from cv2 where cv2.key in (select key from cv2 c where c.key < '11');
@@ -44,10 +48,26 @@ group by key, value
 having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key )
 ;
 
+describe extended cv3;
+
 select * from cv3;
 
 
 -- join of subquery views
 select *
 from cv3
-where cv3.key in (select key from cv1);
\ No newline at end of file
+where cv3.key in (select key from cv1);
+
+drop table tc;
+
+create table tc (`@d` int);
+
+insert overwrite table tc select 1 from src limit 1;
+
+drop view tcv;
+
+create view tcv as select * from tc b where exists (select a.`@d` from tc a where b.`@d`=a.`@d`);
+
+describe extended tcv;
+
+select * from tcv;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out b/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
index 4830c00..f7251e3 100644
--- a/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
+++ b/ql/src/test/results/clientnegative/subquery_exists_implicit_gby.q.out
@@ -1,7 +1 @@
-FAILED: SemanticException Line 7:7 Invalid SubQuery expression 'key' in definition of SubQuery sq_1 [
-exists 
-  (select count(*) 
-  from src a 
-  where b.value = a.value  and a.key = b.key and a.value > 'val_9'
-  )
-] used as sq_1 at Line 5:6: An Exists predicate on SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten. (predicate will always return true).
+FAILED: SemanticException [Error 10250]: Line 7:7 Invalid SubQuery expression 'key': An Exists predicate on SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten. (predicate will always return true).

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/results/clientnegative/subquery_nested_subquery.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_nested_subquery.q.out b/ql/src/test/results/clientnegative/subquery_nested_subquery.q.out
index ae3bc8f..140b093 100644
--- a/ql/src/test/results/clientnegative/subquery_nested_subquery.q.out
+++ b/ql/src/test/results/clientnegative/subquery_nested_subquery.q.out
@@ -1,3 +1 @@
-FAILED: SemanticException Line 3:53 Unsupported SubQuery Expression 'p_name' in definition of SubQuery sq_1 [
-x.p_name in (select y.p_name from part y where exists (select z.p_name from part z where y.p_name = z.p_name))
-] used as sq_1 at Line 3:15: Nested SubQuery expressions are not supported.
+FAILED: SemanticException [Error 10249]: Line 3:53 Unsupported SubQuery Expression 'p_name': Nested SubQuery expressions are not supported.

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out b/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
index 74422af..6d9fa0a 100644
--- a/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
+++ b/ql/src/test/results/clientnegative/subquery_notexists_implicit_gby.q.out
@@ -1,7 +1 @@
-FAILED: SemanticException Line 7:7 Invalid SubQuery expression 'key' in definition of SubQuery sq_1 [
-exists 
-  (select sum(1)
-  from src a 
-  where b.value = a.value  and a.key = b.key and a.value > 'val_9'
-  )
-] used as sq_1 at Line 5:10: A Not Exists predicate on SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten. (predicate will always return false).
+FAILED: SemanticException [Error 10250]: Line 7:7 Invalid SubQuery expression 'key': A Not Exists predicate on SubQuery with implicit Aggregation(no Group By clause) cannot be rewritten. (predicate will always return false).

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/results/clientnegative/subquery_windowing_corr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/subquery_windowing_corr.q.out b/ql/src/test/results/clientnegative/subquery_windowing_corr.q.out
index 647a535..dcd3026 100644
--- a/ql/src/test/results/clientnegative/subquery_windowing_corr.q.out
+++ b/ql/src/test/results/clientnegative/subquery_windowing_corr.q.out
@@ -1,6 +1 @@
-FAILED: SemanticException Line 6:8 Unsupported SubQuery Expression '1' in definition of SubQuery sq_1 [
-a.p_size in 
-  (select first_value(p_size) over(partition by p_mfgr order by p_size) 
-   from part b 
-   where a.p_brand = b.p_brand)
-] used as sq_1 at Line 4:15: Correlated Sub Queries cannot contain Windowing clauses.
+FAILED: SemanticException [Error 10249]: Line 6:8 Unsupported SubQuery Expression '1': Correlated Sub Queries cannot contain Windowing clauses.

http://git-wip-us.apache.org/repos/asf/hive/blob/8da2ed30/ql/src/test/results/clientpositive/subquery_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out
index cfa7339..470fa83 100644
--- a/ql/src/test/results/clientpositive/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/subquery_views.q.out
@@ -26,6 +26,26 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@cv1
+PREHOOK: query: describe extended cv1
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv1
+POSTHOOK: query: describe extended cv1
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv1
+key                 	string              	                    
+value               	string              	                    
+	 	 
+#### A masked pattern was here ####
+from src b 	 	 
+where exists	 	 
+  (select a.key 	 	 
+  from src a 	 	 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_9'), viewExpandedText:select `b`.`key`, `b`.`value` 	 	 
+from `default`.`src` `b` 	 	 
+where exists	 	 
+  (select `a`.`key` 	 	 
+  from `default`.`src` `a` 	 	 
+  where `b`.`value` = `a`.`value`  and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), tableType:VIRTUAL_VIEW)		 
 PREHOOK: query: select * 
 from cv1 where cv1.key in (select key from cv1 c where c.key > '95')
 PREHOOK: type: QUERY
@@ -69,6 +89,28 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@cv2
+PREHOOK: query: describe extended cv2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv2
+POSTHOOK: query: describe extended cv2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv2
+key                 	string              	                    
+value               	string              	                    
+	 	 
+#### A masked pattern was here ####
+from src b 	 	 
+where b.key not in	 	 
+  (select a.key 	 	 
+  from src a 	 	 
+  where b.value = a.value  and a.key = b.key and a.value > 'val_11'	 	 
+  ), viewExpandedText:select `b`.`key`, `b`.`value` 	 	 
+from `default`.`src` `b` 	 	 
+where `b`.`key` not in	 	 
+  (select `a`.`key` 	 	 
+  from `default`.`src` `a` 	 	 
+  where `b`.`value` = `a`.`value`  and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11'	 	 
+  ), tableType:VIRTUAL_VIEW)		 
 Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
 PREHOOK: query: explain
@@ -425,6 +467,25 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@cv3
+PREHOOK: query: describe extended cv3
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@cv3
+POSTHOOK: query: describe extended cv3
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@cv3
+key                 	string              	                    
+value               	string              	                    
+_c2                 	bigint              	                    
+	 	 
+#### A masked pattern was here ####
+from src b	 	 
+where b.key in (select key from src where src.key > '8')	 	 
+group by key, value	 	 
+having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ), viewExpandedText:select `b`.`key`, `b`.`value`, count(*) 	 	 
+from `default`.`src` `b`	 	 
+where `b`.`key` in (select `src`.`key` from `default`.`src` where `src`.`key` > '8')	 	 
+group by `b`.`key`, `b`.`value`	 	 
+having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), tableType:VIRTUAL_VIEW)		 
 PREHOOK: query: select * from cv3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cv3
@@ -473,3 +534,58 @@ POSTHOOK: Input: default@src
 96	val_96	1
 97	val_97	2
 98	val_98	2
+PREHOOK: query: drop table tc
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table tc
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table tc (`@d` int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tc
+POSTHOOK: query: create table tc (`@d` int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tc
+PREHOOK: query: insert overwrite table tc select 1 from src limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@tc
+POSTHOOK: query: insert overwrite table tc select 1 from src limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@tc
+POSTHOOK: Lineage: tc.@d SIMPLE []
+PREHOOK: query: drop view tcv
+PREHOOK: type: DROPVIEW
+POSTHOOK: query: drop view tcv
+POSTHOOK: type: DROPVIEW
+PREHOOK: query: create view tcv as select * from tc b where exists (select a.`@d` from tc a where b.`@d`=a.`@d`)
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@tc
+PREHOOK: Output: database:default
+PREHOOK: Output: default@tcv
+POSTHOOK: query: create view tcv as select * from tc b where exists (select a.`@d` from tc a where b.`@d`=a.`@d`)
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@tc
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@tcv
+PREHOOK: query: describe extended tcv
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@tcv
+POSTHOOK: query: describe extended tcv
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@tcv
+@d                  	int                 	                    
+	 	 
+#### A masked pattern was here ####
+PREHOOK: query: select * from tcv
+PREHOOK: type: QUERY
+PREHOOK: Input: default@tc
+PREHOOK: Input: default@tcv
+#### A masked pattern was here ####
+POSTHOOK: query: select * from tcv
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@tc
+POSTHOOK: Input: default@tcv
+#### A masked pattern was here ####
+1


[34/41] hive git commit: HIVE-11706 : Implement show create database (Navis via Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11706 : Implement show create database (Navis via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3cf7bd9e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3cf7bd9e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3cf7bd9e

Branch: refs/heads/llap
Commit: 3cf7bd9e871e7f258d764b2d988cabfb356b6c71
Parents: 8da2ed3
Author: Navis Ryu <na...@apache.org>
Authored: Tue Sep 8 22:01:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Thu Sep 17 13:35:17 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 269 ++++++++-----------
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  17 ++
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |   7 +-
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |   2 +
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java |  21 ++
 .../hadoop/hive/ql/plan/HiveOperation.java      |   1 +
 .../hive/ql/plan/ShowCreateDatabaseDesc.java    |  94 +++++++
 .../authorization/plugin/HiveOperationType.java |   1 +
 .../plugin/sqlstd/Operation2Privilege.java      |   2 +
 .../clientpositive/show_create_database.q       |   3 +
 .../clientpositive/show_create_database.q.out   |  19 ++
 .../tez/show_create_database.q.out              |  19 ++
 12 files changed, 296 insertions(+), 159 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 734742c..210736b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
+import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
@@ -440,6 +441,11 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         return showPartitions(db, showParts);
       }
 
+      ShowCreateDatabaseDesc showCreateDb = work.getShowCreateDbDesc();
+      if (showCreateDb != null) {
+        return showCreateDatabase(db, showCreateDb);
+      }
+
       ShowCreateTableDesc showCreateTbl = work.getShowCreateTblDesc();
       if (showCreateTbl != null) {
         return showCreateTable(db, showCreateTbl);
@@ -545,9 +551,23 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     return 0;
   }
 
-  private DataOutputStream getOutputStream(Path outputFile) throws Exception {
-    FileSystem fs = outputFile.getFileSystem(conf);
-    return fs.create(outputFile);
+  private DataOutputStream getOutputStream(String resFile) throws HiveException {
+    try {
+      return getOutputStream(new Path(resFile));
+    } catch (HiveException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+  }
+
+  private DataOutputStream getOutputStream(Path outputFile) throws HiveException {
+    try {
+      FileSystem fs = outputFile.getFileSystem(conf);
+      return fs.create(outputFile);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
   }
 
   /**
@@ -1891,16 +1911,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showParts.getResFile());
     try {
-      Path resFile = new Path(showParts.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       formatter.showTablePartitions(outStream, parts);
-
-      outStream.close();
-      outStream = null;
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show partitions for table " + tabName);
     } finally {
@@ -1918,6 +1931,40 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       "NULL DEFINED AS"
   };
 
+  private int showCreateDatabase(Hive db, ShowCreateDatabaseDesc showCreateDb) throws HiveException {
+    DataOutputStream outStream = getOutputStream(showCreateDb.getResFile());
+    try {
+      String dbName = showCreateDb.getDatabaseName();
+      return showCreateDatabase(db, outStream, dbName);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+  }
+
+  private int showCreateDatabase(Hive db, DataOutputStream outStream, String databaseName)
+      throws Exception {
+    Database database = db.getDatabase(databaseName);
+
+    StringBuilder createDb_str = new StringBuilder();
+    createDb_str.append("CREATE DATABASE `").append(database.getName()).append("`\n");
+    if (database.getDescription() != null) {
+      createDb_str.append("COMMENT\n  '");
+      createDb_str.append(escapeHiveCommand(database.getDescription())).append("'\n");
+    }
+    createDb_str.append("LOCATION\n  '");
+    createDb_str.append(database.getLocationUri()).append("'\n");
+    String propertiesToString = propertiesToString(database.getParameters(), null);
+    if (!propertiesToString.isEmpty()) {
+      createDb_str.append("WITH DBPROPERTIES (\n");
+      createDb_str.append(propertiesToString).append(")\n");
+    }
+
+    outStream.write(createDb_str.toString().getBytes("UTF-8"));
+    return 0;
+  }
+
   /**
    * Write a statement of how to create a table to a file.
    *
@@ -1931,6 +1978,19 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
    */
   private int showCreateTable(Hive db, ShowCreateTableDesc showCreateTbl) throws HiveException {
     // get the create table statement for the table and populate the output
+    DataOutputStream outStream = getOutputStream(showCreateTbl.getResFile());
+    try {
+      String tableName = showCreateTbl.getTableName();
+      return showCreateTable(db, outStream, tableName);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    } finally {
+      IOUtils.closeStream(outStream);
+    }
+  }
+
+  private int showCreateTable(Hive db, DataOutputStream outStream, String tableName)
+      throws HiveException {
     final String EXTERNAL = "external";
     final String TEMPORARY = "temporary";
     final String LIST_COLUMNS = "columns";
@@ -1943,22 +2003,14 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     boolean needsLocation = true;
     StringBuilder createTab_str = new StringBuilder();
 
-    String tableName = showCreateTbl.getTableName();
     Table tbl = db.getTable(tableName, false);
-    DataOutputStream outStream = null;
     List<String> duplicateProps = new ArrayList<String>();
     try {
-      Path resFile = new Path(showCreateTbl.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       needsLocation = doesTableNeedLocation(tbl);
 
       if (tbl.isView()) {
         String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + tbl.getViewExpandedText();
         outStream.writeBytes(createTab_stmt.toString());
-        outStream.close();
-        outStream = null;
         return 0;
       }
 
@@ -2115,18 +2167,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       String tbl_location = "  '" + escapeHiveCommand(sd.getLocation()) + "'";
 
       // Table properties
-      String tbl_properties = "";
-      if (!tbl.getParameters().isEmpty()) {
-        Map<String, String> properties = new TreeMap<String, String>(tbl.getParameters());
-        List<String> realProps = new ArrayList<String>();
-        for (String key : properties.keySet()) {
-          if (properties.get(key) != null && !duplicateProps.contains(key)) {
-            realProps.add("  '" + key + "'='" +
-                escapeHiveCommand(StringEscapeUtils.escapeJava(properties.get(key))) + "'");
-          }
-        }
-        tbl_properties += StringUtils.join(realProps, ", \n");
-      }
+      String tbl_properties = propertiesToString(tbl.getParameters(), duplicateProps);
 
       createTab_stmt.add(TEMPORARY, tbl_temp);
       createTab_stmt.add(EXTERNAL, tbl_external);
@@ -2142,23 +2183,30 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       createTab_stmt.add(TBL_PROPERTIES, tbl_properties);
 
       outStream.writeBytes(createTab_stmt.render());
-      outStream.close();
-      outStream = null;
-    } catch (FileNotFoundException e) {
-      LOG.info("show create table: " + stringifyException(e));
-      return 1;
     } catch (IOException e) {
       LOG.info("show create table: " + stringifyException(e));
       return 1;
-    } catch (Exception e) {
-      throw new HiveException(e);
-    } finally {
-      IOUtils.closeStream(outStream);
     }
 
     return 0;
   }
 
+  private String propertiesToString(Map<String, String> props, List<String> exclude) {
+    String prop_string = "";
+    if (!props.isEmpty()) {
+      Map<String, String> properties = new TreeMap<String, String>(props);
+      List<String> realProps = new ArrayList<String>();
+      for (String key : properties.keySet()) {
+        if (properties.get(key) != null && (exclude == null || !exclude.contains(key))) {
+          realProps.add("  '" + key + "'='" +
+              escapeHiveCommand(StringEscapeUtils.escapeJava(properties.get(key))) + "'");
+        }
+      }
+      prop_string += StringUtils.join(realProps, ", \n");
+    }
+    return prop_string;
+  }
+
   private boolean containsNonNull(String[] values) {
     for (String value : values) {
       if (value != null) {
@@ -2202,12 +2250,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     indexes = db.getIndexes(tbl.getDbName(), tbl.getTableName(), (short) -1);
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showIndexes.getResFile());
     try {
-      Path resFile = new Path(showIndexes.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       if (showIndexes.isFormatted()) {
         // column headers
         outStream.writeBytes(MetaDataFormatUtils.getIndexColumnsHeader());
@@ -2219,10 +2263,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       {
         outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(index));
       }
-
-      outStream.close();
-      outStream = null;
-
     } catch (FileNotFoundException e) {
       LOG.info("show indexes: " + stringifyException(e));
       throw new HiveException(e.toString());
@@ -2259,15 +2299,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     LOG.info("results : " + databases.size());
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showDatabasesDesc.getResFile());
     try {
-      Path resFile = new Path(showDatabasesDesc.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       formatter.showDatabases(outStream, databases);
-      outStream.close();
-      outStream = null;
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show databases");
     } finally {
@@ -2304,16 +2338,10 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showTbls.getResFile());
     try {
-      Path resFile = new Path(showTbls.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
       formatter.showTables(outStream, sortedTbls);
-      outStream.close();
-      outStream = null;
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName);
     } finally {
@@ -2328,12 +2356,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     Table table = db.getTable(showCols.getTableName());
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showCols.getResFile());;
     try {
-      Path resFile = new Path(showCols.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       List<FieldSchema> cols = table.getCols();
       cols.addAll(table.getPartCols());
       // In case the query is served by HiveServer2, don't pad it with spaces,
@@ -2341,8 +2365,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       boolean isOutputPadded = !SessionState.get().isHiveServerQuery();
       outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(
           cols, false, isOutputPadded, null));
-      outStream.close();
-      outStream = null;
     } catch (IOException e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
     } finally {
@@ -2377,11 +2399,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showFuncs.getResFile());
     try {
-      Path resFile = new Path(showFuncs.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
       SortedSet<String> sortedFuncs = new TreeSet<String>(funcs);
       // To remove the primitive types
       sortedFuncs.removeAll(serdeConstants.PrimitiveTypes);
@@ -2392,8 +2411,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         outStream.writeBytes(iterFuncs.next());
         outStream.write(terminator);
       }
-      outStream.close();
-      outStream = null;
     } catch (FileNotFoundException e) {
       LOG.warn("show function: " + stringifyException(e));
       return 1;
@@ -2430,11 +2447,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showLocks.getResFile());
     try {
-      Path resFile = new Path(showLocks.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
       List<HiveLock> locks = null;
 
       if (showLocks.getTableName() == null) {
@@ -2490,8 +2504,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         }
         outStream.write(terminator);
       }
-      outStream.close();
-      outStream = null;
     } catch (FileNotFoundException e) {
       LOG.warn("show function: " + stringifyException(e));
       return 1;
@@ -2518,12 +2530,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     ShowLocksResponse rsp = lockMgr.getLocks();
 
     // write the results in the file
-    DataOutputStream os = null;
+    DataOutputStream os = getOutputStream(showLocks.getResFile());
     try {
-      Path resFile = new Path(showLocks.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      os = fs.create(resFile);
-
       // Write a header
       os.writeBytes("Lock ID");
       os.write(separator);
@@ -2577,9 +2585,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         }
 
       }
-
-      os.close();
-      os = null;
     } catch (FileNotFoundException e) {
       LOG.warn("show function: " + stringifyException(e));
       return 1;
@@ -2599,12 +2604,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     ShowCompactResponse rsp = db.showCompactions();
 
     // Write the results into the file
-    DataOutputStream os = null;
+    DataOutputStream os = getOutputStream(desc.getResFile());
     try {
-      Path resFile = new Path(desc.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      os = fs.create(resFile);
-
       // Write a header
       os.writeBytes("Database");
       os.write(separator);
@@ -2641,7 +2642,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           os.write(terminator);
         }
       }
-      os.close();
     } catch (IOException e) {
       LOG.warn("show compactions: " + stringifyException(e));
       return 1;
@@ -2656,12 +2656,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     GetOpenTxnsInfoResponse rsp = db.showTransactions();
 
     // Write the results into the file
-    DataOutputStream os = null;
+    DataOutputStream os = getOutputStream(desc.getResFile());
     try {
-      Path resFile = new Path(desc.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      os = fs.create(resFile);
-
       // Write a header
       os.writeBytes("Transaction ID");
       os.write(separator);
@@ -2682,7 +2678,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         os.writeBytes(txn.getHostname());
         os.write(terminator);
       }
-      os.close();
     } catch (IOException e) {
       LOG.warn("show transactions: " + stringifyException(e));
       return 1;
@@ -2763,12 +2758,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     String funcName = descFunc.getName();
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(descFunc.getResFile());
     try {
-      Path resFile = new Path(descFunc.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       // get the function documentation
       Description desc = null;
       Class<?> funcClass = null;
@@ -2801,9 +2792,6 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
 
       outStream.write(terminator);
-
-      outStream.close();
-      outStream = null;
     } catch (FileNotFoundException e) {
       LOG.warn("describe function: " + stringifyException(e));
       return 1;
@@ -2819,12 +2807,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   }
 
   private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException {
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(descDatabase.getResFile());
     try {
-      Path resFile = new Path(descDatabase.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       Database database = db.getDatabase(descDatabase.getDatabaseName());
 
       if (database == null) {
@@ -2851,9 +2835,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           database.getDescription(), location,
           database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params);
 
-      outStream.close();
-      outStream = null;
-    } catch (IOException e) {
+    } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
     } finally {
       IOUtils.closeStream(outStream);
@@ -2899,16 +2881,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
     }
 
     // write the results in the file
-    DataOutputStream outStream = null;
+    DataOutputStream outStream = getOutputStream(showTblStatus.getResFile());
     try {
-      Path resFile = new Path(showTblStatus.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
-
       formatter.showTableStatus(outStream, db, conf, tbls, part, par);
-
-      outStream.close();
-      outStream = null;
     } catch (Exception e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show table status");
     } finally {
@@ -3011,40 +2986,22 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
 
     // describe the table - populate the output stream
     Table tbl = db.getTable(tableName, false);
+    if (tbl == null) {
+      throw new HiveException(ErrorMsg.INVALID_TABLE, tableName);
+    }
     Partition part = null;
-    DataOutputStream outStream = null;
-    try {
-      Path resFile = new Path(descTbl.getResFile());
-      if (tbl == null) {
-        FileSystem fs = resFile.getFileSystem(conf);
-        outStream = fs.create(resFile);
-        outStream.close();
-        outStream = null;
-        throw new HiveException(ErrorMsg.INVALID_TABLE, tableName);
-      }
-      if (descTbl.getPartSpec() != null) {
-        part = db.getPartition(tbl, descTbl.getPartSpec(), false);
-        if (part == null) {
-          FileSystem fs = resFile.getFileSystem(conf);
-          outStream = fs.create(resFile);
-          outStream.close();
-          outStream = null;
-          throw new HiveException(ErrorMsg.INVALID_PARTITION,
-              StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName);
-        }
-        tbl = part.getTable();
+    if (descTbl.getPartSpec() != null) {
+      part = db.getPartition(tbl, descTbl.getPartSpec(), false);
+      if (part == null) {
+        throw new HiveException(ErrorMsg.INVALID_PARTITION,
+            StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName);
       }
-    } catch (IOException e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName);
-    } finally {
-      IOUtils.closeStream(outStream);
+      tbl = part.getTable();
     }
 
+    DataOutputStream outStream = getOutputStream(descTbl.getResFile());
     try {
       LOG.info("DDLTask: got data for " + tbl.getTableName());
-      Path resFile = new Path(descTbl.getResFile());
-      FileSystem fs = resFile.getFileSystem(conf);
-      outStream = fs.create(resFile);
 
       List<FieldSchema> cols = null;
       List<ColumnStatisticsObj> colStats = null;
@@ -3092,13 +3049,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
           descTbl.isPretty(), isOutputPadded, colStats);
 
       LOG.info("DDLTask: written data for " + tbl.getTableName());
-      outStream.close();
-      outStream = null;
 
     } catch (SQLException e) {
       throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName);
-    } catch (IOException e) {
-      throw new HiveException(e, ErrorMsg.GENERIC_ERROR, tableName);
     } finally {
       IOUtils.closeStream(outStream);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 9f8c756..2d7d9d4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -106,6 +106,7 @@ import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
 import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCompactionsDesc;
 import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
+import org.apache.hadoop.hive.ql.plan.ShowCreateDatabaseDesc;
 import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
 import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
 import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
@@ -413,6 +414,10 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
       ctx.setResFile(ctx.getLocalTmpPath());
       analyzeShowPartitions(ast);
       break;
+    case HiveParser.TOK_SHOW_CREATEDATABASE:
+      ctx.setResFile(ctx.getLocalTmpPath());
+      analyzeShowCreateDatabase(ast);
+      break;
     case HiveParser.TOK_SHOW_CREATETABLE:
       ctx.setResFile(ctx.getLocalTmpPath());
       analyzeShowCreateTable(ast);
@@ -2078,6 +2083,18 @@ public class DDLSemanticAnalyzer extends BaseSemanticAnalyzer {
     setFetchTask(createFetchTask(showPartsDesc.getSchema()));
   }
 
+  private void analyzeShowCreateDatabase(ASTNode ast) throws SemanticException {
+    String dbName = getUnescapedName((ASTNode)ast.getChild(0));
+    ShowCreateDatabaseDesc showCreateDbDesc =
+        new ShowCreateDatabaseDesc(dbName, ctx.getResFile().toString());
+
+    Database database = getDatabase(dbName);
+    inputs.add(new ReadEntity(database));
+    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+        showCreateDbDesc), conf));
+    setFetchTask(createFetchTask(showCreateDbDesc.getSchema()));
+  }
+
   private void analyzeShowCreateTable(ASTNode ast) throws SemanticException {
     ShowCreateTableDesc showCreateTblDesc;
     String tableName = getUnescapedName((ASTNode)ast.getChild(0));

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index 3969a54..3df67e9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -178,6 +178,7 @@ TOK_SHOWTABLES;
 TOK_SHOWCOLUMNS;
 TOK_SHOWFUNCTIONS;
 TOK_SHOWPARTITIONS;
+TOK_SHOW_CREATEDATABASE;
 TOK_SHOW_CREATETABLE;
 TOK_SHOW_TABLESTATUS;
 TOK_SHOW_TBLPROPERTIES;
@@ -1374,7 +1375,11 @@ showStatement
     -> ^(TOK_SHOWCOLUMNS tableName $db_name?)
     | KW_SHOW KW_FUNCTIONS (KW_LIKE showFunctionIdentifier|showFunctionIdentifier)?  -> ^(TOK_SHOWFUNCTIONS KW_LIKE? showFunctionIdentifier?)
     | KW_SHOW KW_PARTITIONS tabName=tableName partitionSpec? -> ^(TOK_SHOWPARTITIONS $tabName partitionSpec?) 
-    | KW_SHOW KW_CREATE KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName)
+    | KW_SHOW KW_CREATE (
+        (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) db_name=identifier -> ^(TOK_SHOW_CREATEDATABASE $db_name)
+        |
+        KW_TABLE tabName=tableName -> ^(TOK_SHOW_CREATETABLE $tabName)
+      )
     | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec?
     -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?)
     | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?)

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index a2fbc11..0affe84 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -65,6 +65,7 @@ public final class SemanticAnalyzerFactory {
     commandType.put(HiveParser.TOK_SHOWCOLUMNS, HiveOperation.SHOWCOLUMNS);
     commandType.put(HiveParser.TOK_SHOW_TABLESTATUS, HiveOperation.SHOW_TABLESTATUS);
     commandType.put(HiveParser.TOK_SHOW_TBLPROPERTIES, HiveOperation.SHOW_TBLPROPERTIES);
+    commandType.put(HiveParser.TOK_SHOW_CREATEDATABASE, HiveOperation.SHOW_CREATEDATABASE);
     commandType.put(HiveParser.TOK_SHOW_CREATETABLE, HiveOperation.SHOW_CREATETABLE);
     commandType.put(HiveParser.TOK_SHOWFUNCTIONS, HiveOperation.SHOWFUNCTIONS);
     commandType.put(HiveParser.TOK_SHOWINDEXES, HiveOperation.SHOWINDEXES);
@@ -227,6 +228,7 @@ public final class SemanticAnalyzerFactory {
       case HiveParser.TOK_SHOWCOLUMNS:
       case HiveParser.TOK_SHOW_TABLESTATUS:
       case HiveParser.TOK_SHOW_TBLPROPERTIES:
+      case HiveParser.TOK_SHOW_CREATEDATABASE:
       case HiveParser.TOK_SHOW_CREATETABLE:
       case HiveParser.TOK_SHOWFUNCTIONS:
       case HiveParser.TOK_SHOWPARTITIONS:

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
index 8dbb3c1..a4c3db1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
@@ -57,6 +57,7 @@ public class DDLWork implements Serializable {
   private ShowTxnsDesc showTxnsDesc;
   private DescFunctionDesc descFunctionDesc;
   private ShowPartitionsDesc showPartsDesc;
+  private ShowCreateDatabaseDesc showCreateDbDesc;
   private ShowCreateTableDesc showCreateTblDesc;
   private DescTableDesc descTblDesc;
   private AddPartitionDesc addPartitionDesc;
@@ -367,6 +368,16 @@ public class DDLWork implements Serializable {
   }
 
   /**
+   * @param showCreateDbDesc
+   */
+  public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
+      ShowCreateDatabaseDesc showCreateDbDesc) {
+    this(inputs, outputs);
+
+    this.showCreateDbDesc = showCreateDbDesc;
+  }
+
+  /**
    * @param showCreateTblDesc
    */
   public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
@@ -832,6 +843,16 @@ public class DDLWork implements Serializable {
     this.showPartsDesc = showPartsDesc;
   }
 
+  @Explain(displayName = "Show Create Database Operator",
+      explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public ShowCreateDatabaseDesc getShowCreateDbDesc() {
+    return showCreateDbDesc;
+  }
+
+  public void setShowCreateDbDesc(ShowCreateDatabaseDesc showCreateDbDesc) {
+    this.showCreateDbDesc = showCreateDbDesc;
+  }
+
   /**
    * @return the showCreateTblDesc
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
index dee2136..af7e43e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
@@ -64,6 +64,7 @@ public enum HiveOperation {
   SHOWCOLUMNS("SHOWCOLUMNS", null, null),
   SHOW_TABLESTATUS("SHOW_TABLESTATUS", null, null),
   SHOW_TBLPROPERTIES("SHOW_TBLPROPERTIES", null, null),
+  SHOW_CREATEDATABASE("SHOW_CREATEDATABASE", new Privilege[]{Privilege.SELECT}, null),
   SHOW_CREATETABLE("SHOW_CREATETABLE", new Privilege[]{Privilege.SELECT}, null),
   SHOWFUNCTIONS("SHOWFUNCTIONS", null, null),
   SHOWINDEXES("SHOWINDEXES", null, null),

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
new file mode 100644
index 0000000..2b12691
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowCreateDatabaseDesc.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.plan;
+
+import java.io.Serializable;
+
+import org.apache.hadoop.hive.ql.plan.Explain.Level;
+
+
+/**
+ * ShowCreateDatabaseDesc.
+ *
+ */
+@Explain(displayName = "Show Create Database",
+    explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+public class ShowCreateDatabaseDesc extends DDLDesc implements Serializable {
+  private static final long serialVersionUID = 1L;
+  String resFile;
+  String dbName;
+
+  /**
+   * thrift ddl for the result of showcreatedatabase.
+   */
+  private static final String schema = "createdb_stmt#string";
+
+  public String getSchema() {
+    return schema;
+  }
+
+  /**
+   * For serialization use only.
+   */
+  public ShowCreateDatabaseDesc() {
+  }
+
+  /**
+   * @param resFile
+   * @param dbName
+   *          name of database to show
+   */
+  public ShowCreateDatabaseDesc(String dbName, String resFile) {
+    this.dbName = dbName;
+    this.resFile = resFile;
+  }
+
+  /**
+   * @return the resFile
+   */
+  @Explain(displayName = "result file", explainLevels = { Level.EXTENDED })
+  public String getResFile() {
+    return resFile;
+  }
+
+  /**
+   * @param resFile
+   *          the resFile to set
+   */
+  public void setResFile(String resFile) {
+    this.resFile = resFile;
+  }
+
+  /**
+   * @return the databaseName
+   */
+  @Explain(displayName = "database name",
+      explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
+  public String getDatabaseName() {
+    return dbName;
+  }
+
+  /**
+   * @param databaseName
+   *          the dbName to set
+   */
+  public void setDatabaseName(String dbName) {
+    this.dbName = dbName;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
index 71be469..5418e9a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
@@ -66,6 +66,7 @@ public enum HiveOperationType {
   SHOWCOLUMNS,
   SHOW_TABLESTATUS,
   SHOW_TBLPROPERTIES,
+  SHOW_CREATEDATABASE,
   SHOW_CREATETABLE,
   SHOWFUNCTIONS,
   SHOWINDEXES,

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
index 8e61d57..ca8f53f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
@@ -309,6 +309,8 @@ public class Operation2Privilege {
     // for now require select WITH GRANT
     op2Priv.put(HiveOperationType.SHOW_CREATETABLE, PrivRequirement.newIOPrivRequirement
 (SEL_GRANT_AR, null));
+    op2Priv.put(HiveOperationType.SHOW_CREATEDATABASE, PrivRequirement.newIOPrivRequirement
+(SEL_GRANT_AR, null));
 
     // for now allow only create-view with 'select with grant'
     // the owner will also have select with grant privileges on new view

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/test/queries/clientpositive/show_create_database.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/show_create_database.q b/ql/src/test/queries/clientpositive/show_create_database.q
new file mode 100644
index 0000000..6136f23
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/show_create_database.q
@@ -0,0 +1,3 @@
+CREATE DATABASE some_database comment 'for show create db test' WITH DBPROPERTIES ('somekey'='somevalue');
+SHOW CREATE DATABASE some_database;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/test/results/clientpositive/show_create_database.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/show_create_database.q.out b/ql/src/test/results/clientpositive/show_create_database.q.out
new file mode 100644
index 0000000..4755d2d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/show_create_database.q.out
@@ -0,0 +1,19 @@
+PREHOOK: query: CREATE DATABASE some_database comment 'for show create db test' WITH DBPROPERTIES ('somekey'='somevalue')
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:some_database
+POSTHOOK: query: CREATE DATABASE some_database comment 'for show create db test' WITH DBPROPERTIES ('somekey'='somevalue')
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:some_database
+PREHOOK: query: SHOW CREATE DATABASE some_database
+PREHOOK: type: SHOW_CREATEDATABASE
+PREHOOK: Input: database:some_database
+POSTHOOK: query: SHOW CREATE DATABASE some_database
+POSTHOOK: type: SHOW_CREATEDATABASE
+POSTHOOK: Input: database:some_database
+CREATE DATABASE `some_database`
+COMMENT
+  'for show create db test'
+LOCATION
+#### A masked pattern was here ####
+WITH DBPROPERTIES (
+  'somekey'='somevalue')

http://git-wip-us.apache.org/repos/asf/hive/blob/3cf7bd9e/ql/src/test/results/clientpositive/tez/show_create_database.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/show_create_database.q.out b/ql/src/test/results/clientpositive/tez/show_create_database.q.out
new file mode 100644
index 0000000..4755d2d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/show_create_database.q.out
@@ -0,0 +1,19 @@
+PREHOOK: query: CREATE DATABASE some_database comment 'for show create db test' WITH DBPROPERTIES ('somekey'='somevalue')
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:some_database
+POSTHOOK: query: CREATE DATABASE some_database comment 'for show create db test' WITH DBPROPERTIES ('somekey'='somevalue')
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:some_database
+PREHOOK: query: SHOW CREATE DATABASE some_database
+PREHOOK: type: SHOW_CREATEDATABASE
+PREHOOK: Input: database:some_database
+POSTHOOK: query: SHOW CREATE DATABASE some_database
+POSTHOOK: type: SHOW_CREATEDATABASE
+POSTHOOK: Input: database:some_database
+CREATE DATABASE `some_database`
+COMMENT
+  'for show create db test'
+LOCATION
+#### A masked pattern was here ####
+WITH DBPROPERTIES (
+  'somekey'='somevalue')


[41/41] hive git commit: HIVE-11886 : LLAP: merge master into branch (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11886 : LLAP: merge master into branch (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f324305a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f324305a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f324305a

Branch: refs/heads/llap
Commit: f324305a71ac31faa568b8a0078b1e9b217a3570
Parents: 79c7031 e9c8d7c
Author: Sergey Shelukhin <se...@apache.org>
Authored: Fri Sep 18 13:35:36 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Fri Sep 18 13:35:36 2015 -0700

----------------------------------------------------------------------
 .../benchmark/serde/LazySimpleSerDeBench.java   |  453 ++++
 .../hive/ql/security/FolderPermissionBase.java  |   17 +-
 .../test/resources/testconfiguration.properties |    2 +
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |    9 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |    3 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    6 +
 pom.xml                                         |   22 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |  269 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   19 +-
 .../ql/exec/persistence/PTFRowContainer.java    |   14 +-
 .../hive/ql/exec/persistence/RowContainer.java  |   12 +-
 .../ql/exec/tez/tools/KeyValuesInputMerger.java |    1 -
 .../ql/exec/vector/VectorizationContext.java    |   10 +-
 .../hadoop/hive/ql/hooks/LineageLogger.java     |   95 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  150 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSerde.java  |    1 +
 .../apache/hadoop/hive/ql/io/orc/OrcStruct.java |    2 +-
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |    4 +-
 .../hive/ql/io/parquet/ProjectionPusher.java    |    3 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |    4 +
 .../apache/hadoop/hive/ql/lib/RuleRegExp.java   |   61 +-
 .../ql/optimizer/ColumnPrunerProcFactory.java   |    3 +
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |    4 +-
 .../calcite/reloperators/HiveBetween.java       |   75 +
 .../optimizer/calcite/reloperators/HiveIn.java  |   41 +
 .../rules/HiveAggregateProjectMergeRule.java    |  151 ++
 .../calcite/rules/HivePreFilteringRule.java     |   37 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |  145 +-
 .../translator/PlanModifierForASTConv.java      |    4 +-
 .../translator/SqlFunctionConverter.java        |   16 +-
 .../hive/ql/optimizer/lineage/LineageCtx.java   |    8 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   11 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |   17 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    7 +-
 .../apache/hadoop/hive/ql/parse/QBSubQuery.java |    7 -
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |    2 +
 .../hive/ql/parse/SemanticAnalyzerFactory.java  |    2 +
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |   11 -
 .../org/apache/hadoop/hive/ql/plan/DDLWork.java |   21 +
 .../hadoop/hive/ql/plan/HiveOperation.java      |    1 +
 .../hive/ql/plan/ShowCreateDatabaseDesc.java    |   94 +
 .../authorization/plugin/HiveOperationType.java |    1 +
 .../plugin/sqlstd/Operation2Privilege.java      |    2 +
 .../org/apache/hadoop/hive/ql/udf/UDFJson.java  |    2 +
 .../hive/ql/udf/generic/GenericUDAFMax.java     |   16 +-
 .../exec/persistence/TestPTFRowContainer.java   |   31 +-
 .../hadoop/hive/ql/io/orc/TestOrcStruct.java    |    2 +
 .../clientpositive/drop_table_with_index.q      |   35 +
 .../queries/clientpositive/exchgpartition2lel.q |   32 +
 ql/src/test/queries/clientpositive/lineage3.q   |   26 +
 .../test/queries/clientpositive/load_orc_part.q |    5 +
 .../clientpositive/show_create_database.q       |    3 +
 .../queries/clientpositive/subquery_views.q     |   22 +-
 .../queries/clientpositive/vector_char_cast.q   |    9 +
 .../queries/clientpositive/windowing_udaf.q     |    4 +
 .../subquery_exists_implicit_gby.q.out          |    8 +-
 .../subquery_nested_subquery.q.out              |    4 +-
 .../subquery_notexists_implicit_gby.q.out       |    8 +-
 .../subquery_windowing_corr.q.out               |    7 +-
 .../alter_partition_coltype.q.out               |    8 +-
 .../clientpositive/annotate_stats_groupby.q.out |  106 +-
 .../annotate_stats_groupby2.q.out               |   28 +-
 .../results/clientpositive/auto_join18.q.out    |   12 +-
 .../auto_join18_multi_distinct.q.out            |   12 +-
 .../results/clientpositive/auto_join27.q.out    |   18 +-
 .../results/clientpositive/auto_join32.q.out    |    4 +-
 .../clientpositive/binarysortable_1.q.out       |  Bin 4329 -> 4325 bytes
 .../clientpositive/correlationoptimizer2.q.out  |  220 +-
 .../clientpositive/correlationoptimizer6.q.out  |  232 +-
 ql/src/test/results/clientpositive/count.q.out  |   14 +-
 .../results/clientpositive/ctas_colname.q.out   |   52 +-
 .../test/results/clientpositive/database.q.out  |    2 +-
 .../clientpositive/decimal_precision.q.out      |    4 +-
 .../results/clientpositive/decimal_udf.q.out    |   30 +-
 .../results/clientpositive/distinct_stats.q.out |   14 +-
 .../clientpositive/drop_table_with_index.q.out  |  152 ++
 .../dynpart_sort_opt_vectorization.q.out        |  105 +-
 .../dynpart_sort_optimization.q.out             |  105 +-
 ...ryption_select_read_only_encrypted_tbl.q.out |    4 +-
 .../clientpositive/exchgpartition2lel.q.out     |  182 ++
 .../clientpositive/explain_logical.q.out        |   78 +-
 .../clientpositive/fetch_aggregation.q.out      |    4 +-
 .../test/results/clientpositive/gby_star.q.out  |   54 +-
 .../test/results/clientpositive/groupby12.q.out |    6 +-
 .../results/clientpositive/groupby5_map.q.out   |    4 +-
 .../clientpositive/groupby5_map_skew.q.out      |    4 +-
 .../results/clientpositive/groupby_cube1.q.out  |   12 +-
 .../groupby_distinct_samekey.q.out              |    6 +-
 .../clientpositive/groupby_grouping_sets2.q.out |   10 +-
 .../clientpositive/groupby_grouping_sets3.q.out |   12 +-
 .../clientpositive/groupby_grouping_sets5.q.out |    8 +-
 .../clientpositive/groupby_grouping_sets6.q.out |    8 +-
 .../clientpositive/groupby_position.q.out       |   36 +-
 .../clientpositive/groupby_resolution.q.out     |   60 +-
 .../clientpositive/groupby_rollup1.q.out        |   12 +-
 .../clientpositive/groupby_sort_10.q.out        |    8 +-
 .../clientpositive/groupby_sort_11.q.out        |   10 +-
 .../results/clientpositive/groupby_sort_8.q.out |   12 +-
 ql/src/test/results/clientpositive/having.q.out |   62 +-
 .../test/results/clientpositive/having2.q.out   |   12 +-
 .../clientpositive/index_auto_mult_tables.q.out |   12 +-
 .../clientpositive/index_auto_self_join.q.out   |   12 +-
 .../clientpositive/index_auto_update.q.out      |    6 +-
 .../index_bitmap_auto_partitioned.q.out         |    6 +-
 .../index_bitmap_compression.q.out              |    6 +-
 .../infer_bucket_sort_dyn_part.q.out            |    4 +-
 .../infer_bucket_sort_map_operators.q.out       |    4 +-
 ql/src/test/results/clientpositive/join18.q.out |   12 +-
 .../clientpositive/join18_multi_distinct.q.out  |   12 +-
 ql/src/test/results/clientpositive/join31.q.out |   36 +-
 .../limit_partition_metadataonly.q.out          |    4 +-
 .../results/clientpositive/limit_pushdown.q.out |   36 +-
 .../test/results/clientpositive/lineage2.q.out  |    2 +-
 .../test/results/clientpositive/lineage3.q.out  |   72 +-
 .../list_bucket_query_multiskew_3.q.out         |    2 +-
 .../results/clientpositive/load_orc_part.q.out  |   26 +
 .../clientpositive/mapjoin_mapjoin.q.out        |   32 +-
 .../clientpositive/metadata_only_queries.q.out  |    4 +-
 .../results/clientpositive/metadataonly1.q.out  |  112 +-
 .../results/clientpositive/multiMapJoin2.q.out  |  226 +-
 .../nonblock_op_deduplicate.q.out               |    8 +-
 .../results/clientpositive/nonmr_fetch.q.out    |   14 +-
 .../clientpositive/partition_multilevels.q.out  |    8 +-
 .../test/results/clientpositive/ppd_gby.q.out   |   12 +-
 .../test/results/clientpositive/ppd_gby2.q.out  |   60 +-
 .../clientpositive/ppd_join_filter.q.out        |   98 +-
 .../ql_rewrite_gbtoidx_cbo_1.q.out              |  168 +-
 .../ql_rewrite_gbtoidx_cbo_2.q.out              |   94 +-
 .../reduce_deduplicate_extended.q.out           |   32 +-
 .../clientpositive/selectDistinctStar.q.out     |   44 +-
 .../clientpositive/show_create_database.q.out   |   19 +
 .../clientpositive/spark/auto_join18.q.out      |   10 +-
 .../spark/auto_join18_multi_distinct.q.out      |   12 +-
 .../clientpositive/spark/auto_join27.q.out      |   18 +-
 .../clientpositive/spark/auto_join32.q.out      |   53 +-
 .../results/clientpositive/spark/count.q.out    |   14 +-
 .../clientpositive/spark/groupby5_map.q.out     |    4 +-
 .../spark/groupby5_map_skew.q.out               |    4 +-
 .../clientpositive/spark/groupby_cube1.q.out    |   12 +-
 .../clientpositive/spark/groupby_position.q.out |   18 +-
 .../spark/groupby_resolution.q.out              |   60 +-
 .../clientpositive/spark/groupby_rollup1.q.out  |   12 +-
 .../results/clientpositive/spark/having.q.out   |   62 +-
 .../spark/infer_bucket_sort_map_operators.q.out |    4 +-
 .../results/clientpositive/spark/join18.q.out   |   10 +-
 .../spark/join18_multi_distinct.q.out           |   12 +-
 .../results/clientpositive/spark/join31.q.out   |   36 +-
 .../spark/limit_partition_metadataonly.q.out    |    4 +-
 .../clientpositive/spark/limit_pushdown.q.out   |   34 +-
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |   24 +-
 .../spark/metadata_only_queries.q.out           |    4 +-
 .../clientpositive/spark/ppd_join_filter.q.out  |   90 +-
 .../spark/ql_rewrite_gbtoidx_cbo_1.q.out        |  168 +-
 .../clientpositive/spark/stats_only_null.q.out  |    8 +-
 .../clientpositive/spark/subquery_in.q.out      |   36 +-
 .../results/clientpositive/spark/union11.q.out  |   42 +-
 .../results/clientpositive/spark/union14.q.out  |   28 +-
 .../results/clientpositive/spark/union15.q.out  |   28 +-
 .../results/clientpositive/spark/union28.q.out  |    4 +-
 .../results/clientpositive/spark/union30.q.out  |    4 +-
 .../results/clientpositive/spark/union33.q.out  |    8 +-
 .../results/clientpositive/spark/union5.q.out   |   34 +-
 .../results/clientpositive/spark/union7.q.out   |   28 +-
 .../clientpositive/spark/union_remove_21.q.out  |    4 +-
 .../spark/vector_count_distinct.q.out           |    4 +-
 .../spark/vector_decimal_aggregate.q.out        |   12 +-
 .../spark/vector_distinct_2.q.out               |   28 +-
 .../clientpositive/spark/vector_groupby_3.q.out |   30 +-
 .../spark/vector_mapjoin_reduce.q.out           |   36 +-
 .../clientpositive/spark/vector_orderby_5.q.out |    6 +-
 .../clientpositive/spark/vectorization_0.q.out  |   16 +-
 .../clientpositive/spark/vectorization_13.q.out |   32 +-
 .../clientpositive/spark/vectorization_15.q.out |   16 +-
 .../clientpositive/spark/vectorization_16.q.out |   16 +-
 .../clientpositive/spark/vectorization_9.q.out  |   16 +-
 .../spark/vectorization_pushdown.q.out          |    4 +-
 .../spark/vectorization_short_regress.q.out     |   74 +-
 .../spark/vectorized_nested_mapjoin.q.out       |   17 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   12 +-
 .../clientpositive/stats_only_null.q.out        |    8 +-
 .../results/clientpositive/stats_ppr_all.q.out  |   16 +-
 .../subq_where_serialization.q.out              |   18 +-
 .../clientpositive/subquery_exists_having.q.out |   48 +-
 .../results/clientpositive/subquery_in.q.out    |   36 +-
 .../clientpositive/subquery_in_having.q.out     |  260 +-
 .../clientpositive/subquery_notexists.q.out     |   18 +-
 .../subquery_notexists_having.q.out             |   26 +-
 .../results/clientpositive/subquery_notin.q.out |   24 +-
 .../subquery_notin_having.q.java1.7.out         |   50 +-
 .../subquery_unqualcolumnrefs.q.out             |   74 +-
 .../results/clientpositive/subquery_views.q.out |  124 +-
 .../test/results/clientpositive/tez/count.q.out |   14 +-
 .../tez/dynamic_partition_pruning.q.out         |   88 +-
 .../tez/dynpart_sort_opt_vectorization.q.out    |   90 +-
 .../tez/dynpart_sort_optimization.q.out         |   89 +-
 .../clientpositive/tez/explainuser_1.q.out      | 2319 +++++++++---------
 .../clientpositive/tez/explainuser_2.q.out      |  782 +++---
 .../results/clientpositive/tez/having.q.out     |   62 +-
 .../clientpositive/tez/limit_pushdown.q.out     |   34 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    |   24 +-
 .../tez/metadata_only_queries.q.out             |    4 +-
 .../clientpositive/tez/metadataonly1.q.out      |   44 +-
 .../test/results/clientpositive/tez/mrr.q.out   |   94 +-
 .../clientpositive/tez/selectDistinctStar.q.out |   44 +-
 .../tez/show_create_database.q.out              |   19 +
 .../clientpositive/tez/stats_only_null.q.out    |    8 +-
 .../clientpositive/tez/subquery_in.q.out        |   36 +-
 .../results/clientpositive/tez/tez_dml.q.out    |    6 +-
 .../results/clientpositive/tez/union5.q.out     |   44 +-
 .../results/clientpositive/tez/union7.q.out     |   28 +-
 .../clientpositive/tez/unionDistinct_1.q.out    |    8 +-
 .../clientpositive/tez/vector_aggregate_9.q.out |    4 +-
 .../tez/vector_binary_join_groupby.q.out        |    4 +-
 .../clientpositive/tez/vector_char_cast.q.out   |   35 +
 .../tez/vector_count_distinct.q.out             |    4 +-
 .../tez/vector_decimal_aggregate.q.out          |   12 +-
 .../tez/vector_decimal_precision.q.out          |    4 +-
 .../clientpositive/tez/vector_decimal_udf.q.out |   30 +-
 .../clientpositive/tez/vector_distinct_2.q.out  |   28 +-
 .../clientpositive/tez/vector_groupby_3.q.out   |   30 +-
 .../tez/vector_groupby_reduce.q.out             |    8 +-
 .../tez/vector_grouping_sets.q.out              |    8 +-
 .../tez/vector_mapjoin_reduce.q.out             |   36 +-
 .../clientpositive/tez/vector_orderby_5.q.out   |    6 +-
 .../clientpositive/tez/vector_outer_join2.q.out |   20 +-
 .../tez/vector_partition_diff_num_cols.q.out    |   20 +-
 .../tez/vector_partitioned_date_time.q.out      |   12 +-
 .../tez/vector_reduce_groupby_decimal.q.out     |   24 +-
 .../clientpositive/tez/vectorization_0.q.out    |   16 +-
 .../clientpositive/tez/vectorization_13.q.out   |   32 +-
 .../clientpositive/tez/vectorization_15.q.out   |   16 +-
 .../clientpositive/tez/vectorization_16.q.out   |   16 +-
 .../clientpositive/tez/vectorization_9.q.out    |   16 +-
 .../tez/vectorization_limit.q.out               |   14 +-
 .../tez/vectorization_pushdown.q.out            |    4 +-
 .../tez/vectorization_short_regress.q.out       |   74 +-
 .../tez/vectorized_distinct_gby.q.out           |    8 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   88 +-
 .../tez/vectorized_nested_mapjoin.q.out         |   18 +-
 .../clientpositive/tez/vectorized_parquet.q.out |    6 +-
 .../tez/vectorized_timestamp_funcs.q.out        |   12 +-
 ql/src/test/results/clientpositive/udf8.q.out   |    4 +-
 .../test/results/clientpositive/udf_count.q.out |   16 +-
 .../test/results/clientpositive/union11.q.out   |   70 +-
 .../test/results/clientpositive/union14.q.out   |   32 +-
 .../test/results/clientpositive/union15.q.out   |   38 +-
 .../test/results/clientpositive/union28.q.out   |    8 +-
 .../test/results/clientpositive/union30.q.out   |    8 +-
 .../test/results/clientpositive/union33.q.out   |    8 +-
 ql/src/test/results/clientpositive/union5.q.out |   48 +-
 ql/src/test/results/clientpositive/union7.q.out |   32 +-
 .../clientpositive/unionDistinct_1.q.out        |    8 +-
 .../clientpositive/union_remove_21.q.out        |    8 +-
 .../clientpositive/vector_aggregate_9.q.out     |    4 +-
 .../vector_aggregate_without_gby.q.out          |    4 +-
 .../vector_binary_join_groupby.q.out            |    4 +-
 .../clientpositive/vector_char_cast.q.out       |   35 +
 .../clientpositive/vector_count_distinct.q.out  |    6 +-
 .../vector_decimal_aggregate.q.out              |   12 +-
 .../vector_decimal_precision.q.out              |    4 +-
 .../clientpositive/vector_decimal_udf.q.out     |   30 +-
 .../clientpositive/vector_distinct_2.q.out      |   28 +-
 .../clientpositive/vector_groupby_3.q.out       |   30 +-
 .../clientpositive/vector_groupby_reduce.q.out  |    8 +-
 .../clientpositive/vector_grouping_sets.q.out   |    8 +-
 .../clientpositive/vector_left_outer_join.q.out |    8 +-
 .../clientpositive/vector_mapjoin_reduce.q.out  |   36 +-
 .../clientpositive/vector_orderby_5.q.out       |    6 +-
 .../clientpositive/vector_outer_join1.q.out     |    8 +-
 .../clientpositive/vector_outer_join2.q.out     |   28 +-
 .../clientpositive/vector_outer_join3.q.out     |   24 +-
 .../clientpositive/vector_outer_join4.q.out     |    8 +-
 .../clientpositive/vector_outer_join5.q.out     |   48 +-
 .../vector_partition_diff_num_cols.q.out        |   20 +-
 .../vector_partitioned_date_time.q.out          |   12 +-
 .../vector_reduce_groupby_decimal.q.out         |   24 +-
 .../clientpositive/vectorization_0.q.out        |   16 +-
 .../clientpositive/vectorization_13.q.out       |   32 +-
 .../clientpositive/vectorization_15.q.out       |   16 +-
 .../clientpositive/vectorization_16.q.out       |   16 +-
 .../clientpositive/vectorization_9.q.out        |   16 +-
 .../clientpositive/vectorization_limit.q.out    |   16 +-
 .../clientpositive/vectorization_pushdown.q.out |    4 +-
 .../vectorization_short_regress.q.out           |   74 +-
 .../vectorized_distinct_gby.q.out               |   12 +-
 .../vectorized_nested_mapjoin.q.out             |   26 +-
 .../clientpositive/vectorized_parquet.q.out     |    6 +-
 .../vectorized_parquet_types.q.out              |    6 +-
 .../vectorized_timestamp_funcs.q.out            |   12 +-
 .../results/clientpositive/windowing_udaf.q.out |   12 +
 .../hive/serde2/ColumnProjectionUtils.java      |   22 +
 .../hadoop/hive/serde2/lazy/LazyByte.java       |    4 +
 .../hadoop/hive/serde2/lazy/LazyDouble.java     |    4 +
 .../hadoop/hive/serde2/lazy/LazyFloat.java      |    4 +
 .../hadoop/hive/serde2/lazy/LazyInteger.java    |    4 +
 .../hadoop/hive/serde2/lazy/LazyLong.java       |    4 +
 .../hadoop/hive/serde2/lazy/LazyShort.java      |    4 +
 .../hadoop/hive/serde2/lazy/LazyUtils.java      |   28 +
 .../org/apache/hive/service/cli/Column.java     |    2 +-
 .../org/apache/hive/service/cli/TestColumn.java |  129 +
 .../hive/ql/io/sarg/SearchArgumentFactory.java  |    5 +-
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |    7 +-
 302 files changed, 7055 insertions(+), 5389 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --cc llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 4f7bb78,0000000..c934f39
mode 100644,000000..100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@@ -1,949 -1,0 +1,950 @@@
 +package org.apache.hadoop.hive.llap.io.encoded;
 +
 +import java.io.IOException;
 +import java.nio.ByteBuffer;
 +import java.util.ArrayList;
 +import java.util.Arrays;
 +import java.util.Collections;
 +import java.util.List;
 +
 +import org.apache.commons.logging.Log;
 +import org.apache.commons.logging.LogFactory;
 +import org.apache.hadoop.conf.Configuration;
 +import org.apache.hadoop.fs.FileSystem;
 +import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.hive.common.CallableWithNdc;
 +import org.apache.hadoop.hive.common.Pool;
 +import org.apache.hadoop.hive.common.Pool.PoolObjectHelper;
 +import org.apache.hadoop.hive.common.io.DataCache;
 +import org.apache.hadoop.hive.common.io.Allocator;
 +import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
 +import org.apache.hadoop.hive.common.io.DiskRange;
 +import org.apache.hadoop.hive.common.io.DiskRangeList;
 +import org.apache.hadoop.hive.common.io.encoded.MemoryBuffer;
 +import org.apache.hadoop.hive.conf.HiveConf;
 +import org.apache.hadoop.hive.llap.ConsumerFeedback;
 +import org.apache.hadoop.hive.llap.DebugUtils;
 +import org.apache.hadoop.hive.llap.cache.Cache;
 +import org.apache.hadoop.hive.llap.cache.LowLevelCache;
 +import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 +import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
 +import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters.Counter;
 +import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 +import org.apache.hadoop.hive.llap.io.decode.OrcEncodedDataConsumer;
 +import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
 +import org.apache.hadoop.hive.llap.io.metadata.OrcMetadataCache;
 +import org.apache.hadoop.hive.llap.io.metadata.OrcStripeMetadata;
 +import org.apache.hadoop.hive.ql.exec.DDLTask;
 +import org.apache.hadoop.hive.ql.io.AcidUtils;
 +import org.apache.hadoop.hive.ql.io.HdfsUtils;
 +import org.apache.hadoop.hive.ql.io.orc.CompressionKind;
 +import org.apache.hadoop.hive.ql.io.orc.DataReader;
 +import org.apache.hadoop.hive.ql.io.orc.MetadataReader;
 +import org.apache.hadoop.hive.ql.io.orc.OrcFile;
 +import org.apache.hadoop.hive.ql.io.orc.OrcFile.ReaderOptions;
 +import org.apache.hadoop.hive.ql.io.orc.OrcConf;
 +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 +import org.apache.hadoop.hive.ql.io.orc.OrcProto;
 +import org.apache.hadoop.hive.ql.io.orc.OrcSplit;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.Reader;
 +import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl;
 +import org.apache.hadoop.hive.ql.io.orc.RecordReaderImpl.SargApplier;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedOrcFile;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedReader;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.OrcCacheKey;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch;
 +import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory;
 +import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
 +import org.apache.hadoop.hive.ql.io.orc.StripeInformation;
 +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 +import org.apache.hadoop.mapred.FileSplit;
 +import org.apache.hadoop.mapred.InputSplit;
 +import org.apache.hive.common.util.FixedSizedObjectPool;
 +
 +/**
 + * This produces EncodedColumnBatch via ORC EncodedDataImpl.
 + * It serves as Consumer for EncodedColumnBatch too, for the high-level cache scenario where
 + * it inserts itself into the pipeline to put the data in cache, before passing it to the real
 + * consumer. It also serves as ConsumerFeedback that receives processed EncodedColumnBatch-es.
 + */
 +public class OrcEncodedDataReader extends CallableWithNdc<Void>
 +    implements ConsumerFeedback<OrcEncodedColumnBatch>, Consumer<OrcEncodedColumnBatch> {
 +  private static final Log LOG = LogFactory.getLog(OrcEncodedDataReader.class);
 +  public static final FixedSizedObjectPool<ColumnStreamData> CSD_POOL =
 +      new FixedSizedObjectPool<>(8192, new PoolObjectHelper<ColumnStreamData>() {
 +        @Override
 +        public ColumnStreamData create() {
 +          return new ColumnStreamData();
 +        }
 +        @Override
 +        public void resetBeforeOffer(ColumnStreamData t) {
 +          t.reset();
 +        }
 +      });
 +  public static final FixedSizedObjectPool<OrcEncodedColumnBatch> ECB_POOL =
 +      new FixedSizedObjectPool<>(1024, new PoolObjectHelper<OrcEncodedColumnBatch>() {
 +        @Override
 +        public OrcEncodedColumnBatch create() {
 +          return new OrcEncodedColumnBatch();
 +        }
 +        @Override
 +        public void resetBeforeOffer(OrcEncodedColumnBatch t) {
 +          t.reset();
 +        }
 +      });
 +  private static final PoolFactory POOL_FACTORY = new PoolFactory() {
 +    @Override
 +    public <T> Pool<T> createPool(int size, PoolObjectHelper<T> helper) {
 +      return new FixedSizedObjectPool<>(size, helper);
 +    }
 +
 +    @Override
 +    public Pool<ColumnStreamData> createColumnStreamDataPool() {
 +      return CSD_POOL;
 +    }
 +
 +    @Override
 +    public Pool<OrcEncodedColumnBatch> createEncodedColumnBatchPool() {
 +      return ECB_POOL;
 +    }
 +  };
 +
 +  private final OrcMetadataCache metadataCache;
 +  private final LowLevelCache lowLevelCache;
 +  private final Configuration conf;
 +  private final Cache<OrcCacheKey> cache;
 +  private final FileSplit split;
 +  private List<Integer> columnIds;
 +  private final SearchArgument sarg;
 +  private final String[] columnNames;
 +  private final OrcEncodedDataConsumer consumer;
 +  private final QueryFragmentCounters counters;
 +
 +  // Read state.
 +  private int stripeIxFrom;
 +  private OrcFileMetadata fileMetadata;
 +  private Reader orcReader;
 +  private MetadataReader metadataReader;
 +  private EncodedReader stripeReader;
 +  private long fileId;
 +  private FileSystem fs;
 +  /**
 +   * readState[stripeIx'][colIx'] => boolean array (could be a bitmask) of rg-s that need to be
 +   * read. Contains only stripes that are read, and only columns included. null => read all RGs.
 +   */
 +  private boolean[][][] readState;
 +  private volatile boolean isStopped = false;
 +  @SuppressWarnings("unused")
 +  private volatile boolean isPaused = false;
 +
 +  public OrcEncodedDataReader(LowLevelCache lowLevelCache, Cache<OrcCacheKey> cache,
 +      OrcMetadataCache metadataCache, Configuration conf, InputSplit split,
 +      List<Integer> columnIds, SearchArgument sarg, String[] columnNames,
 +      OrcEncodedDataConsumer consumer, QueryFragmentCounters counters) {
 +    this.lowLevelCache = lowLevelCache;
 +    this.metadataCache = metadataCache;
 +    this.cache = cache;
 +    this.conf = conf;
 +    this.split = (FileSplit)split;
 +    this.columnIds = columnIds;
 +    if (this.columnIds != null) {
 +      Collections.sort(this.columnIds);
 +    }
 +    this.sarg = sarg;
 +    this.columnNames = columnNames;
 +    this.consumer = consumer;
 +    this.counters = counters;
 +  }
 +
 +  @Override
 +  public void stop() {
 +    if (LOG.isInfoEnabled()) {
 +      LOG.info("Encoded reader is being stopped");
 +    }
 +    isStopped = true;
 +  }
 +
 +  @Override
 +  public void pause() {
 +    isPaused = true;
 +    // TODO: pause fetching
 +  }
 +
 +  @Override
 +  public void unpause() {
 +    isPaused = false;
 +    // TODO: unpause fetching
 +  }
 +
 +  @Override
 +  protected Void callInternal() throws IOException {
 +    long startTime = counters.startTimeCounter();
 +    if (LlapIoImpl.LOGL.isInfoEnabled()) {
 +      LlapIoImpl.LOG.info("Processing data for " + split.getPath());
 +    }
 +    if (processStop()) {
 +      recordReaderTime(startTime);
 +      return null;
 +    }
 +    counters.setDesc(QueryFragmentCounters.Desc.TABLE, getDbAndTableName(split.getPath()));
 +    orcReader = null;
 +    // 1. Get file metadata from cache, or create the reader and read it.
 +    // Don't cache the filesystem object for now; Tez closes it and FS cache will fix all that
 +    fs = split.getPath().getFileSystem(conf);
 +    fileId = determineFileId(fs, split);
 +    counters.setDesc(QueryFragmentCounters.Desc.FILE, fileId);
 +
 +    try {
 +      fileMetadata = getOrReadFileMetadata();
 +      consumer.setFileMetadata(fileMetadata);
 +      validateFileMetadata();
 +      if (columnIds == null) {
 +        columnIds = createColumnIds(fileMetadata);
 +      }
 +
 +      // 2. Determine which stripes to read based on the split.
 +      determineStripesToRead();
 +    } catch (Throwable t) {
 +      recordReaderTime(startTime);
 +      consumer.setError(t);
 +      return null;
 +    }
 +
 +    if (readState.length == 0) {
 +      consumer.setDone();
 +      recordReaderTime(startTime);
 +      return null; // No data to read.
 +    }
 +    counters.setDesc(QueryFragmentCounters.Desc.STRIPES, stripeIxFrom + "," + readState.length);
 +
 +    // 3. Apply SARG if needed, and otherwise determine what RGs to read.
 +    int stride = fileMetadata.getRowIndexStride();
 +    ArrayList<OrcStripeMetadata> stripeMetadatas = null;
 +    boolean[] globalIncludes = null;
 +    boolean[] sargColumns = null;
 +    try {
 +      globalIncludes = OrcInputFormat.genIncludedColumns(fileMetadata.getTypes(), columnIds, true);
 +      if (sarg != null && stride != 0) {
 +        // TODO: move this to a common method
-         int[] filterColumns = RecordReaderImpl.mapSargColumns(sarg.getLeaves(), columnNames, 0);
++        int[] filterColumns = RecordReaderImpl.mapSargColumnsToOrcInternalColIdx(
++          sarg.getLeaves(), columnNames, 0);
 +        // included will not be null, row options will fill the array with trues if null
 +        sargColumns = new boolean[globalIncludes.length];
 +        for (int i : filterColumns) {
 +          // filter columns may have -1 as index which could be partition column in SARG.
 +          if (i > 0) {
 +            sargColumns[i] = true;
 +          }
 +        }
 +
 +        // If SARG is present, get relevant stripe metadata from cache or readers.
 +        stripeMetadatas = readStripesMetadata(globalIncludes, sargColumns);
 +      }
 +
 +      // Now, apply SARG if any; w/o sarg, this will just initialize readState.
 +      boolean hasData = determineRgsToRead(globalIncludes, stride, stripeMetadatas);
 +      if (!hasData) {
 +        consumer.setDone();
 +        recordReaderTime(startTime);
 +        return null; // No data to read.
 +      }
 +    } catch (Throwable t) {
 +      cleanupReaders();
 +      consumer.setError(t);
 +      recordReaderTime(startTime);
 +      return null;
 +    }
 +
 +    if (processStop()) {
 +      cleanupReaders();
 +      recordReaderTime(startTime);
 +      return null;
 +    }
 +
 +    // 4. Get data from high-level cache.
 +    //    If some cols are fully in cache, this will also give us the modified list of columns to
 +    //    read for every stripe (null means read all of them - the usual path). In any case,
 +    //    readState will be modified for column x rgs that were fetched from high-level cache.
 +    List<Integer>[] stripeColsToRead = null;
 +    if (cache != null) {
 +      try {
 +        stripeColsToRead = produceDataFromCache(stride);
 +      } catch (Throwable t) {
 +        // produceDataFromCache handles its own cleanup.
 +        consumer.setError(t);
 +        cleanupReaders();
 +        recordReaderTime(startTime);
 +        return null;
 +      }
 +    }
 +
 +    // 5. Create encoded data reader.
 +    // In case if we have high-level cache, we will intercept the data and add it there;
 +    // otherwise just pass the data directly to the consumer.
 +    Consumer<OrcEncodedColumnBatch> dataConsumer = (cache == null) ? this.consumer : this;
 +    try {
 +      ensureOrcReader();
 +      // Reader creating updates HDFS counters, don't do it here.
 +      DataWrapperForOrc dw = new DataWrapperForOrc();
 +      stripeReader = orcReader.encodedReader(fileId, dw, dw, POOL_FACTORY);
 +      stripeReader.setDebugTracing(DebugUtils.isTraceOrcEnabled());
 +    } catch (Throwable t) {
 +      consumer.setError(t);
 +      recordReaderTime(startTime);
 +      cleanupReaders();
 +      return null;
 +    }
 +
 +    // 6. Read data.
 +    // TODO: I/O threadpool could be here - one thread per stripe; for now, linear.
 +    OrcBatchKey stripeKey = new OrcBatchKey(fileId, -1, 0);
 +    for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
 +      if (processStop()) {
 +        cleanupReaders();
 +        recordReaderTime(startTime);
 +        return null;
 +      }
 +      int stripeIx = stripeIxFrom + stripeIxMod;
 +      boolean[][] colRgs = null;
 +      boolean[] stripeIncludes = null;
 +      OrcStripeMetadata stripeMetadata = null;
 +      StripeInformation stripe;
 +      try {
 +        List<Integer> cols = stripeColsToRead == null ? null : stripeColsToRead[stripeIxMod];
 +        if (cols != null && cols.isEmpty()) continue; // No need to read this stripe.
 +        stripe = fileMetadata.getStripes().get(stripeIx);
 +
 +        if (DebugUtils.isTraceOrcEnabled()) {
 +          LlapIoImpl.LOG.info("Reading stripe " + stripeIx + ": "
 +              + stripe.getOffset() + ", " + stripe.getLength());
 +        }
 +        colRgs = readState[stripeIxMod];
 +        // We assume that NO_RGS value is only set from SARG filter and for all columns;
 +        // intermediate changes for individual columns will unset values in the array.
 +        // Skip this case for 0-column read. We could probably special-case it just like we do
 +        // in EncodedReaderImpl, but for now it's not that important.
 +        if (colRgs.length > 0 && colRgs[0] == SargApplier.READ_NO_RGS) continue;
 +
 +        // 6.1. Determine the columns to read (usually the same as requested).
 +        if (cache == null || cols == null || cols.size() == colRgs.length) {
 +          cols = columnIds;
 +          stripeIncludes = globalIncludes;
 +        } else {
 +          // We are reading subset of the original columns, remove unnecessary bitmasks/etc.
 +          // This will never happen w/o high-level cache.
 +          stripeIncludes = OrcInputFormat.genIncludedColumns(fileMetadata.getTypes(), cols, true);
 +          colRgs = genStripeColRgs(cols, colRgs);
 +        }
 +
 +        // 6.2. Ensure we have stripe metadata. We might have read it before for RG filtering.
 +        boolean isFoundInCache = false;
 +        if (stripeMetadatas != null) {
 +          stripeMetadata = stripeMetadatas.get(stripeIxMod);
 +        } else {
 +          stripeKey.stripeIx = stripeIx;
 +          stripeMetadata = metadataCache.getStripeMetadata(stripeKey);
 +          isFoundInCache = (stripeMetadata != null);
 +          if (!isFoundInCache) {
 +            counters.incrCounter(Counter.METADATA_CACHE_MISS);
 +            ensureMetadataReader();
 +            long startTimeHdfs = counters.startTimeCounter();
 +            stripeMetadata = new OrcStripeMetadata(
 +                stripeKey, metadataReader, stripe, stripeIncludes, sargColumns);
 +            counters.incrTimeCounter(Counter.HDFS_TIME_US, startTimeHdfs);
 +            stripeMetadata = metadataCache.putStripeMetadata(stripeMetadata);
 +            if (DebugUtils.isTraceOrcEnabled()) {
 +              LlapIoImpl.LOG.info("Caching stripe " + stripeKey.stripeIx
 +                  + " metadata with includes: " + DebugUtils.toString(stripeIncludes));
 +            }
 +            stripeKey = new OrcBatchKey(fileId, -1, 0);
 +          }
 +          consumer.setStripeMetadata(stripeMetadata);
 +        }
 +        if (!stripeMetadata.hasAllIndexes(stripeIncludes)) {
 +          if (DebugUtils.isTraceOrcEnabled()) {
 +            LlapIoImpl.LOG.info("Updating indexes in stripe " + stripeKey.stripeIx
 +                + " metadata for includes: " + DebugUtils.toString(stripeIncludes));
 +          }
 +          assert isFoundInCache;
 +          counters.incrCounter(Counter.METADATA_CACHE_MISS);
 +          ensureMetadataReader();
 +          updateLoadedIndexes(stripeMetadata, stripe, stripeIncludes, sargColumns);
 +        } else if (isFoundInCache) {
 +          counters.incrCounter(Counter.METADATA_CACHE_HIT);
 +        }
 +      } catch (Throwable t) {
 +        consumer.setError(t);
 +        cleanupReaders();
 +        recordReaderTime(startTime);
 +        return null;
 +      }
 +      if (processStop()) {
 +        cleanupReaders();
 +        recordReaderTime(startTime);
 +        return null;
 +      }
 +
 +      // 6.3. Finally, hand off to the stripe reader to produce the data.
 +      //      This is a sync call that will feed data to the consumer.
 +      try {
 +        // TODO: readEncodedColumns is not supposed to throw; errors should be propagated thru
 +        // consumer. It is potentially holding locked buffers, and must perform its own cleanup.
 +        // Also, currently readEncodedColumns is not stoppable. The consumer will discard the
 +        // data it receives for one stripe. We could probably interrupt it, if it checked that.
 +        stripeReader.readEncodedColumns(stripeIx, stripe, stripeMetadata.getRowIndexes(),
 +            stripeMetadata.getEncodings(), stripeMetadata.getStreams(), stripeIncludes,
 +            colRgs, dataConsumer);
 +      } catch (Throwable t) {
 +        consumer.setError(t);
 +        cleanupReaders();
 +        recordReaderTime(startTime);
 +        return null;
 +      }
 +    }
 +
 +    // Done with all the things.
 +    recordReaderTime(startTime);
 +    dataConsumer.setDone();
 +    if (DebugUtils.isTraceMttEnabled()) {
 +      LlapIoImpl.LOG.info("done processing " + split);
 +    }
 +
 +    // Close the stripe reader, we are done reading.
 +    cleanupReaders();
 +    return null;
 +  }
 +
 +  private void recordReaderTime(long startTime) {
 +    counters.incrTimeCounter(Counter.TOTAL_IO_TIME_US, startTime);
 +  }
 +
 +  private static String getDbAndTableName(Path path) {
 +    // Ideally, we'd get this from split; however, split doesn't contain any such thing and it's
 +    // actually pretty hard to get cause even split generator only uses paths. We only need this
 +    // for metrics; therefore, brace for BLACK MAGIC!
 +    String[] parts = path.toUri().getPath().toString().split(Path.SEPARATOR);
 +    int dbIx = -1;
 +    // Try to find the default db postfix; don't check two last components - at least there
 +    // should be a table and file (we could also try to throw away partition/bucket/acid stuff).
 +    for (int i = 0; i < parts.length - 2; ++i) {
 +      if (!parts[i].endsWith(DDLTask.DATABASE_PATH_SUFFIX)) continue;
 +      if (dbIx >= 0) {
 +        dbIx = -1; // Let's not guess.
 +        break;
 +      }
 +      dbIx = i;
 +    }
 +    if (dbIx >= 0) {
 +      return parts[dbIx].substring(0, parts[dbIx].length() - 3) + "." + parts[dbIx + 1];
 +    }
 +
 +    // Just go from the back and throw away everything we think is wrong; skip last item, the file.
 +    boolean isInPartFields = false;
 +    for (int i = parts.length - 2; i >= 0; --i) {
 +      String p = parts[i];
 +      boolean isPartField = p.contains("=");
 +      if ((isInPartFields && !isPartField) || (!isPartField && !p.startsWith(AcidUtils.BASE_PREFIX)
 +          && !p.startsWith(AcidUtils.DELTA_PREFIX) && !p.startsWith(AcidUtils.BUCKET_PREFIX))) {
 +        dbIx = i - 1;
 +        break;
 +      }
 +      isInPartFields = isPartField;
 +    }
 +    // If we found something before we ran out of components, use it.
 +    if (dbIx >= 0) {
 +      String dbName = parts[dbIx];
 +      if (dbName.endsWith(DDLTask.DATABASE_PATH_SUFFIX)) {
 +        dbName = dbName.substring(0, dbName.length() - 3);
 +      }
 +      return dbName + "." + parts[dbIx + 1];
 +    }
 +    return "unknown";
 +  }
 +
 +  private void validateFileMetadata() throws IOException {
 +    if (fileMetadata.getCompressionKind() == CompressionKind.NONE) return;
 +    int bufferSize = fileMetadata.getCompressionBufferSize();
 +    int minAllocSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.LLAP_ORC_CACHE_MIN_ALLOC);
 +    if (bufferSize < minAllocSize) {
 +      LOG.warn("ORC compression buffer size (" + bufferSize + ") is smaller than LLAP low-level "
 +            + "cache minimum allocation size (" + minAllocSize + "). Decrease the value for "
 +            + HiveConf.ConfVars.LLAP_ORC_CACHE_MIN_ALLOC.toString() + " to avoid wasting memory");
 +    }
 +  }
 +
 +  private boolean processStop() {
 +    if (!isStopped) return false;
 +    LOG.info("Encoded data reader is stopping");
 +    cleanupReaders();
 +    return true;
 +  }
 +
 +  private static long determineFileId(FileSystem fs, FileSplit split) throws IOException {
 +    if (split instanceof OrcSplit) {
 +      Long fileId = ((OrcSplit)split).getFileId();
 +      if (fileId != null) {
 +        return fileId;
 +      }
 +    }
 +    LOG.warn("Split for " + split.getPath() + " (" + split.getClass() + ") does not have file ID");
 +    return HdfsUtils.getFileId(fs, split.getPath());
 +  }
 +
 +  private boolean[][] genStripeColRgs(List<Integer> stripeCols, boolean[][] globalColRgs) {
 +    boolean[][] stripeColRgs = new boolean[stripeCols.size()][];
 +    for (int i = 0, i2 = -1; i < globalColRgs.length; ++i) {
 +      if (globalColRgs[i] == null) continue;
 +      stripeColRgs[i2] = globalColRgs[i];
 +      ++i2;
 +    }
 +    return stripeColRgs;
 +  }
 +
 +  /**
 +   * Puts all column indexes from metadata to make a column list to read all column.
 +   */
 +  private static List<Integer> createColumnIds(OrcFileMetadata metadata) {
 +    List<Integer> columnIds = new ArrayList<Integer>(metadata.getTypes().size());
 +    for (int i = 1; i < metadata.getTypes().size(); ++i) {
 +      columnIds.add(i);
 +    }
 +    return columnIds;
 +  }
 +
 +  /**
 +   * In case if stripe metadata in cache does not have all indexes for current query, load
 +   * the missing one. This is a temporary cludge until real metadata cache becomes available.
 +   */
 +  private void updateLoadedIndexes(OrcStripeMetadata stripeMetadata,
 +      StripeInformation stripe, boolean[] stripeIncludes, boolean[] sargColumns) throws IOException {
 +    // We only synchronize on write for now - design of metadata cache is very temporary;
 +    // we pre-allocate the array and never remove entries; so readers should be safe.
 +    synchronized (stripeMetadata) {
 +      if (stripeMetadata.hasAllIndexes(stripeIncludes)) return;
 +      long startTime = counters.startTimeCounter();
 +      stripeMetadata.loadMissingIndexes(metadataReader, stripe, stripeIncludes, sargColumns);
 +      counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
 +    }
 +  }
 +
 +  /**
 +   * Closes the stripe readers (on error).
 +   */
 +  private void cleanupReaders() {
 +    if (metadataReader != null) {
 +      try {
 +        metadataReader.close();
 +      } catch (IOException ex) {
 +        // Ignore.
 +      }
 +    }
 +    if (stripeReader != null) {
 +      try {
 +        stripeReader.close();
 +      } catch (IOException ex) {
 +        // Ignore.
 +      }
 +    }
 +  }
 +
 +  /**
 +   * Ensures orcReader is initialized for the split.
 +   */
 +  private void ensureOrcReader() throws IOException {
 +    if (orcReader != null) return;
 +    Path path = HdfsUtils.getFileIdPath(fs, split.getPath(), fileId);
 +    if (DebugUtils.isTraceOrcEnabled()) {
 +      LOG.info("Creating reader for " + path + " (" + split.getPath() + ")");
 +    }
 +    long startTime = counters.startTimeCounter();
 +    ReaderOptions opts = OrcFile.readerOptions(conf).filesystem(fs).fileMetadata(fileMetadata);
 +    orcReader = EncodedOrcFile.createReader(path, opts);
 +    counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
 +  }
 +
 +  /**
 +   *  Gets file metadata for the split from cache, or reads it from the file.
 +   */
 +  private OrcFileMetadata getOrReadFileMetadata() throws IOException {
 +    OrcFileMetadata metadata = metadataCache.getFileMetadata(fileId);
 +    if (metadata != null) {
 +      counters.incrCounter(Counter.METADATA_CACHE_HIT);
 +      return metadata;
 +    }
 +    counters.incrCounter(Counter.METADATA_CACHE_MISS);
 +    ensureOrcReader();
 +    // We assume this call doesn't touch HDFS because everything is already read; don't add time.
 +    metadata = new OrcFileMetadata(fileId, orcReader);
 +    return metadataCache.putFileMetadata(metadata);
 +  }
 +
 +  /**
 +   * Reads the metadata for all stripes in the file.
 +   */
 +  private ArrayList<OrcStripeMetadata> readStripesMetadata(
 +      boolean[] globalInc, boolean[] sargColumns) throws IOException {
 +    ArrayList<OrcStripeMetadata> result = new ArrayList<OrcStripeMetadata>(readState.length);
 +    OrcBatchKey stripeKey = new OrcBatchKey(fileId, 0, 0);
 +    for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
 +      stripeKey.stripeIx = stripeIxMod + stripeIxFrom;
 +      OrcStripeMetadata value = metadataCache.getStripeMetadata(stripeKey);
 +      if (value == null || !value.hasAllIndexes(globalInc)) {
 +        counters.incrCounter(Counter.METADATA_CACHE_MISS);
 +        ensureMetadataReader();
 +        StripeInformation si = fileMetadata.getStripes().get(stripeKey.stripeIx);
 +        if (value == null) {
 +          long startTime = counters.startTimeCounter();
 +          value = new OrcStripeMetadata(stripeKey, metadataReader, si, globalInc, sargColumns);
 +          counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
 +          value = metadataCache.putStripeMetadata(value);
 +          if (DebugUtils.isTraceOrcEnabled()) {
 +            LlapIoImpl.LOG.info("Caching stripe " + stripeKey.stripeIx
 +                + " metadata with includes: " + DebugUtils.toString(globalInc));
 +          }
 +          // Create new key object to reuse for gets; we've used the old one to put in cache.
 +          stripeKey = new OrcBatchKey(fileId, 0, 0);
 +        }
 +        // We might have got an old value from cache; recheck it has indexes.
 +        if (!value.hasAllIndexes(globalInc)) {
 +          if (DebugUtils.isTraceOrcEnabled()) {
 +            LlapIoImpl.LOG.info("Updating indexes in stripe " + stripeKey.stripeIx
 +                + " metadata for includes: " + DebugUtils.toString(globalInc));
 +          }
 +          updateLoadedIndexes(value, si, globalInc, sargColumns);
 +        }
 +      } else {
 +        counters.incrCounter(Counter.METADATA_CACHE_HIT);
 +      }
 +      result.add(value);
 +      consumer.setStripeMetadata(value);
 +    }
 +    return result;
 +  }
 +
 +  private void ensureMetadataReader() throws IOException {
 +    ensureOrcReader();
 +    if (metadataReader != null) return;
 +    long startTime = counters.startTimeCounter();
 +    metadataReader = orcReader.metadata();
 +    counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
 +  }
 +
 +  @Override
 +  public void returnData(OrcEncodedColumnBatch ecb) {
 +    for (ColumnStreamData[] datas : ecb.getColumnData()) {
 +      if (datas == null) continue;
 +      for (ColumnStreamData data : datas) {
 +        if (data == null || data.decRef() != 0) continue;
 +        if (DebugUtils.isTraceLockingEnabled()) {
 +          for (MemoryBuffer buf : data.getCacheBuffers()) {
 +            LlapIoImpl.LOG.info("Unlocking " + buf + " at the end of processing");
 +          }
 +        }
 +        lowLevelCache.releaseBuffers(data.getCacheBuffers());
 +        CSD_POOL.offer(data);
 +      }
 +    }
 +    // We can offer ECB even with some streams not discarded; reset() will clear the arrays.
 +    ECB_POOL.offer(ecb);
 +  }
 +
 +  /**
 +   * Determines which RGs need to be read, after stripes have been determined.
 +   * SARG is applied, and readState is populated for each stripe accordingly.
 +   * @param stripes All stripes in the file (field state is used to determine stripes to read).
 +   */
 +  private boolean determineRgsToRead(boolean[] globalIncludes, int rowIndexStride,
 +      ArrayList<OrcStripeMetadata> metadata) throws IOException {
 +    SargApplier sargApp = null;
 +    if (sarg != null && rowIndexStride != 0) {
 +      List<OrcProto.Type> types = fileMetadata.getTypes();
 +      String[] colNamesForSarg = OrcInputFormat.getSargColumnNames(
 +          columnNames, types, globalIncludes, fileMetadata.isOriginalFormat());
 +      sargApp = new SargApplier(sarg, colNamesForSarg, rowIndexStride, types, globalIncludes.length);
 +    }
 +    boolean hasAnyData = false;
 +    // readState should have been initialized by this time with an empty array.
 +    for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
 +      int stripeIx = stripeIxMod + stripeIxFrom;
 +      StripeInformation stripe = fileMetadata.getStripes().get(stripeIx);
 +      int rgCount = getRgCount(stripe, rowIndexStride);
 +      boolean[] rgsToRead = null;
 +      if (sargApp != null) {
 +        OrcStripeMetadata stripeMetadata = metadata.get(stripeIxMod);
 +        rgsToRead = sargApp.pickRowGroups(stripe, stripeMetadata.getRowIndexes(),
 +            stripeMetadata.getBloomFilterIndexes(), true);
 +      }
 +      boolean isNone = rgsToRead == SargApplier.READ_NO_RGS,
 +          isAll = rgsToRead == SargApplier.READ_ALL_RGS;
 +      hasAnyData = hasAnyData || !isNone;
 +      if (DebugUtils.isTraceOrcEnabled()) {
 +        if (isNone) {
 +          LlapIoImpl.LOG.info("SARG eliminated all RGs for stripe " + stripeIx);
 +        } else if (!isAll) {
 +          LlapIoImpl.LOG.info("SARG picked RGs for stripe " + stripeIx + ": "
 +              + DebugUtils.toString(rgsToRead));
 +        } else {
 +          LlapIoImpl.LOG.info("Will read all " + rgCount + " RGs for stripe " + stripeIx);
 +        }
 +      }
 +      assert isAll || isNone || rgsToRead.length == rgCount;
 +      readState[stripeIxMod] = new boolean[columnIds.size()][];
 +      for (int j = 0; j < columnIds.size(); ++j) {
 +        readState[stripeIxMod][j] = (isAll || isNone) ? rgsToRead :
 +          Arrays.copyOf(rgsToRead, rgsToRead.length);
 +      }
 +
 +      adjustRgMetric(rgCount, rgsToRead, isNone, isAll);
 +    }
 +    return hasAnyData;
 +  }
 +
 +  private void adjustRgMetric(int rgCount, boolean[] rgsToRead, boolean isNone,
 +      boolean isAll) {
 +    int count = 0;
 +    if (!isAll) {
 +      for (boolean b : rgsToRead) {
 +        if (b)
 +          count++;
 +      }
 +    } else if (!isNone) {
 +      count = rgCount;
 +    }
 +    counters.setCounter(QueryFragmentCounters.Counter.SELECTED_ROWGROUPS, count);
 +  }
 +
 +
 +  private int getRgCount(StripeInformation stripe, int rowIndexStride) {
 +    return (int)Math.ceil((double)stripe.getNumberOfRows() / rowIndexStride);
 +  }
 +
 +  /**
 +   * Determine which stripes to read for a split. Populates stripeIxFrom and readState.
 +   */
 +  public void determineStripesToRead() {
 +    // The unit of caching for ORC is (rg x column) (see OrcBatchKey).
 +    List<StripeInformation> stripes = fileMetadata.getStripes();
 +    long offset = split.getStart(), maxOffset = offset + split.getLength();
 +    stripeIxFrom = -1;
 +    int stripeIxTo = -1;
 +    if (LlapIoImpl.LOGL.isDebugEnabled()) {
 +      String tmp = "FileSplit {" + split.getStart() + ", " + split.getLength() + "}; stripes ";
 +      for (StripeInformation stripe : stripes) {
 +        tmp += "{" + stripe.getOffset() + ", " + stripe.getLength() + "}, ";
 +      }
 +      LlapIoImpl.LOG.debug(tmp);
 +    }
 +
 +    int stripeIx = 0;
 +    for (StripeInformation stripe : stripes) {
 +      long stripeStart = stripe.getOffset();
 +      if (offset > stripeStart) {
 +        // We assume splits will never start in the middle of the stripe.
 +        ++stripeIx;
 +        continue;
 +      }
 +      if (stripeIxFrom == -1) {
 +        if (DebugUtils.isTraceOrcEnabled()) {
 +          LlapIoImpl.LOG.info("Including stripes from " + stripeIx
 +              + " (" + stripeStart + " >= " + offset + ")");
 +        }
 +        stripeIxFrom = stripeIx;
 +      }
 +      if (stripeStart >= maxOffset) {
 +        stripeIxTo = stripeIx;
 +        if (DebugUtils.isTraceOrcEnabled()) {
 +          LlapIoImpl.LOG.info("Including stripes until " + stripeIxTo + " (" + stripeStart
 +              + " >= " + maxOffset + "); " + (stripeIxTo - stripeIxFrom) + " stripes");
 +        }
 +        break;
 +      }
 +      ++stripeIx;
 +    }
 +    if (stripeIxTo == -1) {
 +      stripeIxTo = stripeIx;
 +      if (DebugUtils.isTraceOrcEnabled()) {
 +        LlapIoImpl.LOG.info("Including stripes until " + stripeIx + " (end of file); "
 +            + (stripeIxTo - stripeIxFrom) + " stripes");
 +      }
 +    }
 +    readState = new boolean[stripeIxTo - stripeIxFrom][][];
 +  }
 +
 +  // TODO: split by stripe? we do everything by stripe, and it might be faster
 +  /**
 +   * Takes the data from high-level cache for all stripes and returns to consumer.
 +   * @return List of columns to read per stripe, if any columns were fully eliminated by cache.
 +   */
 +  private List<Integer>[] produceDataFromCache(int rowIndexStride) throws IOException {
 +    OrcCacheKey key = new OrcCacheKey(fileId, -1, -1, -1);
 +    // For each stripe, keep a list of columns that are not fully in cache (null => all of them).
 +    @SuppressWarnings("unchecked")
 +    List<Integer>[] stripeColsNotInCache = new List[readState.length];
 +    for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
 +      key.stripeIx = stripeIxFrom + stripeIxMod;
 +      boolean[][] cols = readState[stripeIxMod];
 +      boolean[] isMissingAnyRgs = new boolean[cols.length];
 +      int totalRgCount = getRgCount(fileMetadata.getStripes().get(key.stripeIx), rowIndexStride);
 +      for (int rgIx = 0; rgIx < totalRgCount; ++rgIx) {
 +        OrcEncodedColumnBatch col = ECB_POOL.take();
 +        col.init(fileId, key.stripeIx, rgIx, cols.length);
 +        boolean hasAnyCached = false;
 +        try {
 +          key.rgIx = rgIx;
 +          for (int colIxMod = 0; colIxMod < cols.length; ++colIxMod) {
 +            boolean[] readMask = cols[colIxMod];
 +            // Check if RG is eliminated by SARG
 +            if ((readMask == SargApplier.READ_NO_RGS) || (readMask != SargApplier.READ_ALL_RGS
 +                && (readMask.length <= rgIx || !readMask[rgIx]))) continue;
 +            key.colIx = columnIds.get(colIxMod);
 +            ColumnStreamData[] cached = cache.get(key);
 +            if (cached == null) {
 +              isMissingAnyRgs[colIxMod] = true;
 +              continue;
 +            }
 +            assert cached.length == OrcEncodedColumnBatch.MAX_DATA_STREAMS;
 +            col.setAllStreamsData(colIxMod, key.colIx, cached);
 +            hasAnyCached = true;
 +            if (readMask == SargApplier.READ_ALL_RGS) {
 +              // We were going to read all RGs, but some were in cache, allocate the mask.
 +              cols[colIxMod] = readMask = new boolean[totalRgCount];
 +              Arrays.fill(readMask, true);
 +            }
 +            readMask[rgIx] = false; // Got from cache, don't read from disk.
 +          }
 +        } catch (Throwable t) {
 +          // TODO: Any cleanup needed to release data in col back to cache should be here.
 +          throw (t instanceof IOException) ? (IOException)t : new IOException(t);
 +        }
 +        if (hasAnyCached) {
 +          consumer.consumeData(col);
 +        }
 +      }
 +      boolean makeStripeColList = false; // By default assume we'll fetch all original columns.
 +      for (int colIxMod = 0; colIxMod < cols.length; ++colIxMod) {
 +        if (isMissingAnyRgs[colIxMod]) {
 +          if (makeStripeColList) {
 +            stripeColsNotInCache[stripeIxMod].add(columnIds.get(colIxMod));
 +          }
 +        } else if (!makeStripeColList) {
 +          // Some columns were fully in cache. Make a per-stripe col list, add previous columns.
 +          makeStripeColList = true;
 +          stripeColsNotInCache[stripeIxMod] = new ArrayList<Integer>(cols.length - 1);
 +          for (int i = 0; i < colIxMod; ++i) {
 +            stripeColsNotInCache[stripeIxMod].add(columnIds.get(i));
 +          }
 +        }
 +      }
 +    }
 +    return stripeColsNotInCache;
 +  }
 +
 +  @Override
 +  public void setDone() {
 +    consumer.setDone();
 +  }
 +
 +  @Override
 +  public void consumeData(OrcEncodedColumnBatch data) {
 +    // Store object in cache; create new key object - cannot be reused.
 +    assert cache != null;
 +    throw new UnsupportedOperationException("not implemented");
 +    /*for (int i = 0; i < data.getColumnData().length; ++i) {
 +      OrcCacheKey key = new OrcCacheKey(data.getBatchKey(), data.getColumnIxs()[i]);
 +      ColumnStreamData[] toCache = data.getColumnData()[i];
 +      ColumnStreamData[] cached = cache.cacheOrGet(key, toCache);
 +      if (toCache != cached) {
 +        for (ColumnStreamData sb : toCache) {
 +          if (sb.decRef() != 0) continue;
 +          lowLevelCache.releaseBuffers(sb.getCacheBuffers());
 +        }
 +        data.getColumnData()[i] = cached;
 +      }
 +    }
 +    consumer.consumeData(data);*/
 +  }
 +
 +  @Override
 +  public void setError(Throwable t) {
 +    consumer.setError(t);
 +  }
 +
 +  private class DataWrapperForOrc implements DataReader, DataCache {
 +    private DataReader orcDataReader;
 +
 +    public DataWrapperForOrc() {
 +      boolean useZeroCopy = (conf != null) && OrcConf.USE_ZEROCOPY.getBoolean(conf);
 +      if (useZeroCopy && !lowLevelCache.getAllocator().isDirectAlloc()) {
 +        throw new UnsupportedOperationException("Cannot use zero-copy reader with non-direct cache "
 +            + "buffers; either disable zero-copy or enable direct cache allocation");
 +      }
 +      this.orcDataReader = orcReader.createDefaultDataReader(useZeroCopy);
 +    }
 +
 +    @Override
 +    public DiskRangeList getFileData(long fileId, DiskRangeList range,
 +        long baseOffset, DiskRangeListFactory factory, BooleanRef gotAllData) {
 +      return lowLevelCache.getFileData(fileId, range, baseOffset, factory, counters, gotAllData);
 +    }
 +
 +    @Override
 +    public long[] putFileData(long fileId, DiskRange[] ranges,
 +        MemoryBuffer[] data, long baseOffset) {
 +      return lowLevelCache.putFileData(
 +          fileId, ranges, data, baseOffset, Priority.NORMAL, counters);
 +    }
 +
 +    @Override
 +    public void releaseBuffer(MemoryBuffer buffer) {
 +      lowLevelCache.releaseBuffer(buffer);
 +    }
 +
 +    @Override
 +    public void reuseBuffer(MemoryBuffer buffer) {
 +      boolean isReused = lowLevelCache.reuseBuffer(buffer);
 +      assert isReused;
 +    }
 +
 +    @Override
 +    public Allocator getAllocator() {
 +      return lowLevelCache.getAllocator();
 +    }
 +
 +    @Override
 +    public void close() throws IOException {
 +      orcDataReader.close();
 +    }
 +
 +    @Override
 +    public DiskRangeList readFileData(DiskRangeList range, long baseOffset,
 +        boolean doForceDirect) throws IOException {
 +      long startTime = counters.startTimeCounter();
 +      DiskRangeList result = orcDataReader.readFileData(range, baseOffset, doForceDirect);
 +      counters.recordHdfsTime(startTime);
 +      if (DebugUtils.isTraceOrcEnabled() && LOG.isInfoEnabled()) {
 +        LOG.info("Disk ranges after disk read (file " + fileId + ", base offset " + baseOffset
 +              + "): " + RecordReaderUtils.stringifyDiskRanges(result));
 +      }
 +      return result;
 +    }
 +
 +    @Override
 +    public boolean isTrackingDiskRanges() {
 +      return orcDataReader.isTrackingDiskRanges();
 +    }
 +
 +    @Override
 +    public void releaseBuffer(ByteBuffer buffer) {
 +      orcDataReader.releaseBuffer(buffer);
 +    }
 +
 +    @Override
 +    public void open() throws IOException {
 +      long startTime = counters.startTimeCounter();
 +      orcDataReader.open();
 +      counters.recordHdfsTime(startTime);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 5770bef,2500fb6..ffeaaa0
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@@ -56,9 -56,9 +56,10 @@@ import org.apache.hadoop.hive.ql.io.Aci
  import org.apache.hadoop.hive.ql.io.AcidUtils.Directory;
  import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
  import org.apache.hadoop.hive.ql.io.InputFormatChecker;
 +import org.apache.hadoop.hive.ql.io.LlapWrappableInputFormatInterface;
  import org.apache.hadoop.hive.ql.io.RecordIdentifier;
  import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+ import org.apache.hadoop.hive.ql.io.orc.OrcFile.WriterVersion;
  import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
  import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
  import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
@@@ -264,10 -265,9 +266,9 @@@ public class OrcInputFormat  implement
      boolean[] result = new boolean[numColumns];
      result[0] = true;
      OrcProto.Type root = types.get(rootColumn);
 -    for(int i=0; i < root.getSubtypesCount(); ++i) {
 +    for (int i = 0; i < root.getSubtypesCount(); ++i) {
        if (included.contains(i)) {
-         includeColumnRecursive(types, result, root.getSubtypes(i),
-             rootColumn);
+         includeColumnRecursive(types, result, root.getSubtypes(i), rootColumn);
        }
      }
      return result;
@@@ -866,33 -901,11 +904,11 @@@
  
        // we can't eliminate stripes if there are deltas because the
        // deltas may change the rows making them match the predicate.
-       if (deltas.isEmpty()) {
-         Reader.Options options = new Reader.Options();
-         options.include(includedCols);
-         setSearchArgument(options, types, context.conf, isOriginal);
-         // only do split pruning if HIVE-8732 has been fixed in the writer
-         if (options.getSearchArgument() != null &&
-             writerVersion != OrcFile.WriterVersion.ORIGINAL) {
-           SearchArgument sarg = options.getSearchArgument();
-           List<PredicateLeaf> sargLeaves = sarg.getLeaves();
-           int[] filterColumns = RecordReaderImpl.mapSargColumns(sargLeaves,
-               options.getColumnNames(), getRootColumn(isOriginal));
- 
-           if (stripeStats != null) {
-             // eliminate stripes that doesn't satisfy the predicate condition
-             includeStripe = new boolean[stripes.size()];
-             for (int i = 0; i < stripes.size(); ++i) {
-               includeStripe[i] = (i >= stripeStats.size()) ||
-                   isStripeSatisfyPredicate(stripeStats.get(i), sarg,
-                       filterColumns);
-               if (isDebugEnabled && !includeStripe[i]) {
-                 LOG.debug("Eliminating ORC stripe-" + i + " of file '" +
-                     fileWithId.getFileStatus().getPath() + "'  as it did not satisfy " +
-                     "predicate condition.");
-               }
-             }
-           }
-         }
+       if (deltas.isEmpty() && canCreateSargFromConf(context.conf)) {
+         SearchArgument sarg = ConvertAstToSearchArg.createFromConf(context.conf);
+         String[] sargColNames = extractNeededColNames(types, context.conf, includedCols, isOriginal);
+         includeStripe = pickStripes(sarg, sargColNames, writerVersion, isOriginal,
 -            metadata.getStripeStatistics(), stripes.size(), file.getPath());
++            stripeStats, stripes.size(), file.getPath());
        }
  
        // if we didn't have predicate pushdown, read everything

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
index c974d00,52c00f9..563213a
--- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out
@@@ -143,20 -143,24 +143,32 @@@ STAGE PLANS
                          Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
              Execution mode: vectorized
          Reducer 2 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
-                 keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: string), _col0 (type: tinyint)
+                   outputColumnNames: _col0, _col1
                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
++<<<<<<< HEAD
 +                  table:
 +                      input format: org.apache.hadoop.mapred.TextInputFormat
 +                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 +                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
++=======
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+             Execution mode: vectorized
++>>>>>>> master
  
    Stage: Stage-0
      Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
index 9687ec1,2255f72..b2402db
--- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out
@@@ -153,13 -152,18 +153,25 @@@ STAGE PLANS
                  mode: mergepartial
                  outputColumnNames: _col0, _col1, _col2
                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
+                   outputColumnNames: _col0, _col1, _col2
                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
++<<<<<<< HEAD
 +                  table:
 +                      input format: org.apache.hadoop.mapred.TextInputFormat
 +                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
 +                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
++=======
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+             Execution mode: vectorized
++>>>>>>> master
  
    Stage: Stage-0
      Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
index a3a44df,bbc66fc..6308cee
--- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out
@@@ -65,22 -65,17 +65,18 @@@ STAGE PLANS
                    Filter Operator
                      predicate: l_partkey is not null (type: boolean)
                      Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: l_partkey (type: int)
+                     Group By Operator
+                       keys: l_partkey (type: int)
+                       mode: hash
                        outputColumnNames: _col0
                        Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: int)
-                         mode: hash
-                         outputColumnNames: _col0
+                       Reduce Output Operator
+                         key expressions: _col0 (type: int)
+                         sort order: +
+                         Map-reduce partition columns: _col0 (type: int)
                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: int)
-                           sort order: +
-                           Map-reduce partition columns: _col0 (type: int)
-                           Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
          Reducer 4 
 +            Execution mode: vectorized
              Local Work:
                Map Reduce Local Work
              Reduce Operator Tree:
@@@ -270,22 -266,17 +266,18 @@@ STAGE PLANS
                    Filter Operator
                      predicate: l_partkey is not null (type: boolean)
                      Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: l_partkey (type: int)
+                     Group By Operator
+                       keys: l_partkey (type: int)
+                       mode: hash
                        outputColumnNames: _col0
                        Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: int)
-                         mode: hash
-                         outputColumnNames: _col0
+                       Reduce Output Operator
+                         key expressions: _col0 (type: int)
+                         sort order: +
+                         Map-reduce partition columns: _col0 (type: int)
                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: int)
-                           sort order: +
-                           Map-reduce partition columns: _col0 (type: int)
-                           Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
          Reducer 4 
 +            Execution mode: vectorized
              Local Work:
                Map Reduce Local Work
              Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vector_orderby_5.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
index 7308fb2,316ed63..bf23ae4
--- a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out
@@@ -97,24 -97,19 +97,27 @@@ STAGE PLANS
                              input vertices:
                                1 Map 4
                              Statistics: Num rows: 7433 Data size: 228226 Basic stats: COMPLETE Column stats: NONE
-                             Select Operator
-                               expressions: _col1 (type: double)
+                             Group By Operator
+                               aggregations: sum(_col1)
+                               mode: hash
                                outputColumnNames: _col0
-                               Statistics: Num rows: 7433 Data size: 228226 Basic stats: COMPLETE Column stats: NONE
-                               Group By Operator
-                                 aggregations: sum(_col0)
-                                 mode: hash
-                                 outputColumnNames: _col0
+                               Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                               Reduce Output Operator
+                                 sort order: 
                                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
++<<<<<<< HEAD
 +                                Reduce Output Operator
 +                                  sort order: 
 +                                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
 +                                  value expressions: _col0 (type: double)
 +            Execution mode: vectorized
++=======
+                                 value expressions: _col0 (type: double)
++>>>>>>> master
              Local Work:
                Map Reduce Local Work
 -            Execution mode: vectorized
          Reducer 3 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
                  aggregations: sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index 2e87e2c,63e6ade..8e4a501
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@@ -1591,37 -1591,37 +1591,37 @@@ STAGE PLANS
                    Filter Operator
                      predicate: (t is null or (t = 27)) (type: boolean)
                      Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                     Group By Operator
+                       keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                       mode: hash
                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
                        Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                         mode: hash
-                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                       Reduce Output Operator
+                         key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                         sort order: +++++
+                         Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
                          Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                           sort order: +++++
-                           Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                           Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
              Execution mode: vectorized
          Reducer 2 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
-                 keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                    Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                   table:
-                       input format: org.apache.hadoop.mapred.TextInputFormat
-                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                       name: default.over1k_part2_orc
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                         name: default.over1k_part2_orc
 -            Execution mode: vectorized
  
    Stage: Stage-2
      Dependency Collection
@@@ -1670,50 -1669,37 +1669,37 @@@ STAGE PLANS
                    Filter Operator
                      predicate: (t is null or (t = 27)) (type: boolean)
                      Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                     Select Operator
-                       expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                     Group By Operator
+                       keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                       mode: hash
                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
                        Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                       Group By Operator
-                         keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                         mode: hash
-                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                       Reduce Output Operator
+                         key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                         sort order: +++++
+                         Map-reduce partition columns: _col0 (type: tinyint)
                          Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                         Reduce Output Operator
-                           key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                           sort order: +++++
-                           Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                           Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
              Execution mode: vectorized
          Reducer 2 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
-                 keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                 Reduce Output Operator
-                   key expressions: _col4 (type: tinyint)
-                   sort order: +
-                   Map-reduce partition columns: _col4 (type: tinyint)
-                   Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                   value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-         Reducer 3 
-             Execution mode: vectorized
-             Reduce Operator Tree:
-               Select Operator
-                 expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
-                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                   outputColumnNames: _col0, _col1, _col2, _col3, _col4
                    Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                   table:
-                       input format: org.apache.hadoop.mapred.TextInputFormat
-                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                       name: default.over1k_part2_orc
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                         name: default.over1k_part2_orc
 -            Execution mode: vectorized
  
    Stage: Stage-2
      Dependency Collection

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/mrr.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/f324305a/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
index a1063ab,6c31294..44d207b
--- a/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
@@@ -143,20 -143,24 +143,24 @@@ STAGE PLANS
                          Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
              Execution mode: vectorized
          Reducer 2 
 +            Execution mode: vectorized
              Reduce Operator Tree:
                Group By Operator
-                 keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                 keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                 File Output Operator
-                   compressed: false
+                 Select Operator
+                   expressions: _col1 (type: string), _col0 (type: tinyint)
+                   outputColumnNames: _col0, _col1
                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                   table:
-                       input format: org.apache.hadoop.mapred.TextInputFormat
-                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                   File Output Operator
+                     compressed: false
+                     Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                     table:
+                         input format: org.apache.hadoop.mapred.TextInputFormat
+                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 -            Execution mode: vectorized
  
    Stage: Stage-0
      Fetch Operator


[18/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_in_having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_in_having.q.out b/ql/src/test/results/clientpositive/subquery_in_having.q.out
index da1da06..deab017 100644
--- a/ql/src/test/results/clientpositive/subquery_in_having.q.out
+++ b/ql/src/test/results/clientpositive/subquery_in_having.q.out
@@ -73,11 +73,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -146,22 +146,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (key > '9') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -287,22 +283,18 @@ STAGE PLANS:
             Filter Operator
               predicate: value is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -362,22 +354,18 @@ STAGE PLANS:
             Filter Operator
               predicate: ((key > '9') and value is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: value (type: string), key (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -386,14 +374,14 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col2
+            expressions: _col1 (type: string), _col2 (type: bigint)
+            outputColumnNames: _col1, _col2
             Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: _col2 is not null (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col2 (type: bigint), _col0 (type: string)
+                expressions: _col2 (type: bigint), _col1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -454,22 +442,18 @@ STAGE PLANS:
             Filter Operator
               predicate: p_mfgr is not null (type: boolean)
               Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_mfgr (type: string), p_size (type: int)
+              Group By Operator
+                aggregations: avg(p_size)
+                keys: p_mfgr (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: avg(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>)
+                  value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: avg(VALUE._col0)
@@ -526,22 +510,18 @@ STAGE PLANS:
             Filter Operator
               predicate: p_mfgr is not null (type: boolean)
               Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_mfgr (type: string), p_size (type: int)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: max(p_size), min(p_size)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1), min(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: int), _col2 (type: int)
+                  value expressions: _col1 (type: int), _col2 (type: int)
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0), min(VALUE._col1)
@@ -612,44 +592,36 @@ STAGE PLANS:
             Filter Operator
               predicate: p_mfgr is not null (type: boolean)
               Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_mfgr (type: string), p_size (type: int)
+              Group By Operator
+                aggregations: avg(p_size)
+                keys: p_mfgr (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: avg(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>)
+                  value expressions: _col1 (type: struct<count:bigint,sum:double,input:int>)
           TableScan
             alias: b
             Statistics: Num rows: 30 Data size: 3173 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: p_mfgr is not null (type: boolean)
               Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_mfgr (type: string), p_size (type: int)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: max(p_size), min(p_size)
+                keys: p_mfgr (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1), min(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 15 Data size: 1586 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: int), _col2 (type: int)
+                  value expressions: _col1 (type: int), _col2 (type: int)
       Reduce Operator Tree:
         Demux Operator
           Statistics: Num rows: 30 Data size: 3172 Basic stats: COMPLETE Column stats: NONE
@@ -905,22 +877,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (key > '9') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -1005,22 +973,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (key > '9') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -1244,22 +1208,18 @@ STAGE PLANS:
             Filter Operator
               predicate: p_name is not null (type: boolean)
               Statistics: Num rows: 8 Data size: 1692 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: p_mfgr (type: string), p_name (type: string), p_size (type: int)
+              Group By Operator
+                aggregations: avg(p_size)
+                keys: p_name (type: string), p_mfgr (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 8 Data size: 1692 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: avg(_col2)
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 8 Data size: 1692 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 8 Data size: 1692 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: struct<count:bigint,sum:double,input:int>)
+                  value expressions: _col2 (type: struct<count:bigint,sum:double,input:int>)
       Reduce Operator Tree:
         Group By Operator
           aggregations: avg(VALUE._col0)
@@ -1267,12 +1227,16 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+          Select Operator
+            expressions: _col1 (type: string), _col0 (type: string), _col2 (type: double)
+            outputColumnNames: _col0, _col1, _col2
+            Statistics: Num rows: 4 Data size: 846 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-5
     Conditional Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_notexists.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notexists.q.out b/ql/src/test/results/clientpositive/subquery_notexists.q.out
index 81b4137..215d855 100644
--- a/ql/src/test/results/clientpositive/subquery_notexists.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notexists.q.out
@@ -257,20 +257,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (value > 'val_2') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_notexists_having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notexists_having.q.out b/ql/src/test/results/clientpositive/subquery_notexists_having.q.out
index fd09901..637fc62 100644
--- a/ql/src/test/results/clientpositive/subquery_notexists_having.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notexists_having.q.out
@@ -34,10 +34,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -186,10 +186,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -260,20 +260,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (value > 'val_12') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out
index fd6d53b..5563794 100644
--- a/ql/src/test/results/clientpositive/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/subquery_notin.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr
 explain
 select * 
@@ -151,7 +151,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * 
 from src 
 where src.key not in  ( select key from src s1 where s1.key > '2')
@@ -285,7 +285,7 @@ POSTHOOK: Input: default@src
 199	val_199
 199	val_199
 2	val_2
-Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select p_mfgr, b.p_name, p_size 
@@ -528,7 +528,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select p_mfgr, b.p_name, p_size 
 from part b 
 where b.p_name not in 
@@ -567,7 +567,7 @@ Manufacturer#4	almond azure aquamarine papaya violet	12
 Manufacturer#5	almond antique blue firebrick mint	31
 Manufacturer#5	almond aquamarine dodger light gainsboro	46
 Manufacturer#5	almond azure blanched chiffon midnight	23
-Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- agg, non corr
 explain
 select p_name, p_size 
@@ -843,7 +843,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select p_name, p_size 
 from 
 part where part.p_size not in 
@@ -890,7 +890,7 @@ almond aquamarine sandy cyan gainsboro	18
 almond aquamarine yellow dodger mint	7
 almond azure aquamarine papaya violet	12
 almond azure blanched chiffon midnight	23
-Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- agg, corr
 explain
 select p_mfgr, p_name, p_size 
@@ -1202,7 +1202,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select p_mfgr, p_name, p_size 
 from part b where b.p_size not in 
   (select min(p_size) 
@@ -1243,7 +1243,7 @@ Manufacturer#5	almond antique medium spring khaki	6
 Manufacturer#5	almond azure blanched chiffon midnight	23
 Manufacturer#5	almond antique blue firebrick mint	31
 Manufacturer#5	almond aquamarine dodger light gainsboro	46
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr, Group By in Parent Query
 select li.l_partkey, count(*) 
 from lineitem li 
@@ -1278,7 +1278,7 @@ POSTHOOK: Input: default@lineitem
 139636	1
 175839	1
 182052	1
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- alternate not in syntax
 select * 
 from src 
@@ -1442,7 +1442,7 @@ POSTHOOK: Input: default@src
 POSTHOOK: Input: default@t1_v
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2_v
-Warning: Shuffle Join JOIN[25][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
@@ -1587,7 +1587,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[25][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
index 775f477..9689ae3 100644
--- a/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.java1.7.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join JOIN[26][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- non agg, non corr
 -- JAVA_VERSION_SPECIFIC_OUTPUT
 
@@ -39,11 +39,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -188,7 +188,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select b.p_mfgr, min(p_retailprice) 
@@ -229,11 +229,11 @@ STAGE PLANS:
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: p_mfgr, p_retailprice
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col1)
-                keys: _col0 (type: string)
+                aggregations: min(p_retailprice)
+                keys: p_mfgr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
@@ -332,11 +332,11 @@ STAGE PLANS:
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: p_mfgr, p_retailprice
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col1), max(_col1)
-                keys: _col0 (type: string)
+                aggregations: min(p_retailprice), max(p_retailprice)
+                keys: p_mfgr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
@@ -404,11 +404,11 @@ STAGE PLANS:
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: p_mfgr, p_retailprice
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col1), max(_col1)
-                keys: _col0 (type: string)
+                aggregations: min(p_retailprice), max(p_retailprice)
+                keys: p_mfgr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
@@ -445,7 +445,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
 from part b 
 group by b.p_mfgr
@@ -470,7 +470,7 @@ POSTHOOK: Input: default@part
 #### A masked pattern was here ####
 Manufacturer#1	1173.15
 Manufacturer#2	1690.68
-Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[35][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- agg, non corr
 explain
 select b.p_mfgr, min(p_retailprice) 
@@ -513,11 +513,11 @@ STAGE PLANS:
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: p_mfgr, p_retailprice
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col1)
-                keys: _col0 (type: string)
+                aggregations: min(p_retailprice)
+                keys: p_mfgr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
@@ -613,11 +613,11 @@ STAGE PLANS:
             Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: p_mfgr (type: string), p_retailprice (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: p_mfgr, p_retailprice
               Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1), min(_col1)
-                keys: _col0 (type: string)
+                aggregations: max(p_retailprice), min(p_retailprice)
+                keys: p_mfgr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
@@ -659,11 +659,11 @@ STAGE PLANS:
               Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: null (type: string), p_retailprice (type: double)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: p_mfgr, p_retailprice
                 Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col1), min(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: max(p_retailprice), min(p_retailprice)
+                  keys: p_mfgr (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
@@ -733,7 +733,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[39][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[35][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: select b.p_mfgr, min(p_retailprice) 
 from part b 
 group by b.p_mfgr

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
index 76d7503..908ad39 100644
--- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
+++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
@@ -549,20 +549,16 @@ STAGE PLANS:
             Filter Operator
               predicate: ((key > '9') and value is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -658,22 +654,18 @@ STAGE PLANS:
             Filter Operator
               predicate: value is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -733,22 +725,18 @@ STAGE PLANS:
             Filter Operator
               predicate: ((key > '9') and value is not null) (type: boolean)
               Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: value (type: string), key (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -757,14 +745,14 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
           Select Operator
-            expressions: _col0 (type: string), _col2 (type: bigint)
-            outputColumnNames: _col0, _col2
+            expressions: _col1 (type: string), _col2 (type: bigint)
+            outputColumnNames: _col1, _col2
             Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: _col2 is not null (type: boolean)
               Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
               Select Operator
-                expressions: _col2 (type: bigint), _col0 (type: string)
+                expressions: _col2 (type: bigint), _col1 (type: string)
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
@@ -785,7 +773,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[31][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[30][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- non agg, corr
 explain
 select p_mfgr, b.p_name, p_size 

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/subquery_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out
index c59d86e..cfa7339 100644
--- a/ql/src/test/results/clientpositive/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/subquery_views.q.out
@@ -69,8 +69,8 @@ POSTHOOK: type: CREATEVIEW
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@cv2
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[52][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
 PREHOOK: query: explain
 select * 
 from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
@@ -378,8 +378,8 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
-Warning: Shuffle Join JOIN[52][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
 PREHOOK: query: select * 
 from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/count.q.out b/ql/src/test/results/clientpositive/tez/count.q.out
index a5e6082..b474f05 100644
--- a/ql/src/test/results/clientpositive/tez/count.q.out
+++ b/ql/src/test/results/clientpositive/tez/count.q.out
@@ -53,11 +53,11 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col2), sum(_col3)
-                      keys: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                      aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
+                      keys: a (type: int), b (type: int), c (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
@@ -188,14 +188,14 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                      key expressions: a (type: int), b (type: int), c (type: int)
                       sort order: +++
-                      Map-reduce partition columns: _col0 (type: int)
+                      Map-reduce partition columns: a (type: int)
                       Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col3 (type: int)
+                      value expressions: d (type: int)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
index cd2de52..e11f3e5 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
@@ -59,10 +59,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
@@ -1773,7 +1773,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -1812,10 +1812,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1875,7 +1875,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -2739,10 +2739,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2757,10 +2757,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2936,10 +2936,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2954,10 +2954,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -3121,20 +3121,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: ds (type: string)
+                  Group By Operator
+                    keys: ds (type: string)
+                    mode: hash
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 10 
             Map Operator Tree:
                 TableScan
@@ -3142,10 +3138,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -3159,20 +3155,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: ds (type: string)
+                  Group By Operator
+                    keys: ds (type: string)
+                    mode: hash
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -3180,10 +3172,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -4133,7 +4125,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4188,10 +4180,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -4233,7 +4225,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4824,10 +4816,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -4842,10 +4834,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
index 4451046..63e6ade 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out
@@ -1591,36 +1591,36 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (t is null or (t = 27)) (type: boolean)
                     Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                    Group By Operator
+                      keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                      mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                        sort order: +++++
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
                         Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          sort order: +++++
-                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.over1k_part2_orc
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.over1k_part2_orc
             Execution mode: vectorized
 
   Stage: Stage-2
@@ -1659,7 +1659,6 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1670,49 +1669,36 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (t is null or (t = 27)) (type: boolean)
                     Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                    Group By Operator
+                      keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                      mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                        sort order: +++++
+                        Map-reduce partition columns: _col0 (type: tinyint)
                         Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          sort order: +++++
-                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col4 (type: tinyint)
-                  sort order: +
-                  Map-reduce partition columns: _col4 (type: tinyint)
+                Select Operator
+                  expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-            Execution mode: vectorized
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.over1k_part2_orc
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.over1k_part2_orc
             Execution mode: vectorized
 
   Stage: Stage-2

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
index cb001b9..b76b4f9 100644
--- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out
@@ -1496,35 +1496,35 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (t is null or (t = 27)) (type: boolean)
                     Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                    Group By Operator
+                      keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                      mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                        sort order: +++++
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          sort order: +++++
-                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.over1k_part2
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.over1k_part2
 
   Stage: Stage-2
     Dependency Collection
@@ -1562,7 +1562,6 @@ STAGE PLANS:
     Tez
       Edges:
         Reducer 2 <- Map 1 (SIMPLE_EDGE)
-        Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -1573,47 +1572,35 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (t is null or (t = 27)) (type: boolean)
                     Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+                    Group By Operator
+                      keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                      mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Reduce Output Operator
+                        key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                        sort order: +++++
+                        Map-reduce partition columns: _col0 (type: tinyint)
                         Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          sort order: +++++
-                          Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                          Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                Reduce Output Operator
-                  key expressions: _col4 (type: tinyint)
-                  sort order: +
-                  Map-reduce partition columns: _col4 (type: tinyint)
-                  Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                  value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-        Reducer 3 
-            Reduce Operator Tree:
-              Select Operator
-                expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
-                outputColumnNames: _col0, _col1, _col2, _col3, _col4
-                Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
                   Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                      name: default.over1k_part2
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                        name: default.over1k_part2
 
   Stage: Stage-2
     Dependency Collection


[13/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out b/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out
index 8118139..966f131 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out
@@ -135,11 +135,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double)
-                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
+                    outputColumnNames: ctinyint, cint, csmallint, cstring1, cfloat, cdouble
                     Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1), min(_col2), count(_col3), avg(_col4), stddev_pop(_col5)
-                      keys: _col0 (type: tinyint)
+                      aggregations: max(cint), min(csmallint), count(cstring1), avg(cfloat), stddev_pop(cdouble)
+                      keys: ctinyint (type: tinyint)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
index b285a4f..cb47605 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out
@@ -644,10 +644,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0), max(_col0), count(_col0), count()
+                      aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count()
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE
@@ -725,10 +725,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(ctimestamp1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -815,10 +815,10 @@ STAGE PLANS:
                   Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctimestamp1 (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctimestamp1
                     Statistics: Num rows: 40 Data size: 84 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: avg(_col0), variance(_col0), var_pop(_col0), var_samp(_col0), std(_col0), stddev(_col0), stddev_pop(_col0), stddev_samp(_col0)
+                      aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/udf8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf8.q.out b/ql/src/test/results/clientpositive/udf8.q.out
index 8276ba9..1f43e23 100644
--- a/ql/src/test/results/clientpositive/udf8.q.out
+++ b/ql/src/test/results/clientpositive/udf8.q.out
@@ -43,10 +43,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: c1 (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: c1
               Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col0), sum(_col0), count(_col0)
+                aggregations: avg(c1), sum(c1), count(c1)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/udf_count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_count.q.out b/ql/src/test/results/clientpositive/udf_count.q.out
index c9a596f..430893d 100644
--- a/ql/src/test/results/clientpositive/udf_count.q.out
+++ b/ql/src/test/results/clientpositive/udf_count.q.out
@@ -29,10 +29,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -86,11 +86,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
-                keys: _col0 (type: string)
+                aggregations: count(DISTINCT key)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -144,11 +144,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0, _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT key, value)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union11.q.out b/ql/src/test/results/clientpositive/union11.q.out
index 0d0b8fa..55d593d 100644
--- a/ql/src/test/results/clientpositive/union11.q.out
+++ b/ql/src/test/results/clientpositive/union11.q.out
@@ -31,28 +31,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -65,48 +67,48 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Union
-              Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
           TableScan
             Union
-              Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
           TableScan
             Union
-              Statistics: Num rows: 3 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -114,10 +116,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -128,28 +130,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst2' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -162,28 +166,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst3' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union14.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union14.q.out b/ql/src/test/results/clientpositive/union14.q.out
index 7a7e938..eabdaa8 100644
--- a/ql/src/test/results/clientpositive/union14.q.out
+++ b/ql/src/test/results/clientpositive/union14.q.out
@@ -27,28 +27,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -67,33 +69,33 @@ STAGE PLANS:
               outputColumnNames: _col0
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Union
-                Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint)
           TableScan
             Union
-              Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -101,10 +103,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union15.q.out b/ql/src/test/results/clientpositive/union15.q.out
index 148f25f..be7f966 100644
--- a/ql/src/test/results/clientpositive/union15.q.out
+++ b/ql/src/test/results/clientpositive/union15.q.out
@@ -29,28 +29,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -63,18 +65,18 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Union
-              Statistics: Num rows: 51 Data size: 470 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
           TableScan
             alias: s2
@@ -84,18 +86,18 @@ STAGE PLANS:
               outputColumnNames: _col0
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Union
-                Statistics: Num rows: 51 Data size: 470 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint)
           TableScan
             alias: s2
@@ -105,18 +107,18 @@ STAGE PLANS:
               outputColumnNames: _col0
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Union
-                Statistics: Num rows: 51 Data size: 470 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 51 Data size: 390 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -124,10 +126,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union28.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union28.q.out b/ql/src/test/results/clientpositive/union28.q.out
index 38f8ee6..c3789d0 100644
--- a/ql/src/test/results/clientpositive/union28.q.out
+++ b/ql/src/test/results/clientpositive/union28.q.out
@@ -53,10 +53,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -193,10 +193,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union30.q.out b/ql/src/test/results/clientpositive/union30.q.out
index 894ed49..26a27c8 100644
--- a/ql/src/test/results/clientpositive/union30.q.out
+++ b/ql/src/test/results/clientpositive/union30.q.out
@@ -67,10 +67,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -228,10 +228,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union33.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union33.q.out b/ql/src/test/results/clientpositive/union33.q.out
index 308cd8b..17e0844 100644
--- a/ql/src/test/results/clientpositive/union33.q.out
+++ b/ql/src/test/results/clientpositive/union33.q.out
@@ -53,11 +53,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -268,11 +268,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union5.q.out b/ql/src/test/results/clientpositive/union5.q.out
index 75389f8..f57c60a 100644
--- a/ql/src/test/results/clientpositive/union5.q.out
+++ b/ql/src/test/results/clientpositive/union5.q.out
@@ -26,28 +26,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -60,33 +62,33 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Union
-              Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
           TableScan
             Union
-              Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -94,10 +96,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -108,28 +110,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst2' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union7.q.out b/ql/src/test/results/clientpositive/union7.q.out
index 592f073..162fe36 100644
--- a/ql/src/test/results/clientpositive/union7.q.out
+++ b/ql/src/test/results/clientpositive/union7.q.out
@@ -25,28 +25,30 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: s1
-            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+              expressions: key (type: string)
+              outputColumnNames: key
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(1)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: 'tst1' (type: string)
             outputColumnNames: _col0
-            Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
             File Output Operator
               compressed: false
               table:
@@ -59,18 +61,18 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             Union
-              Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count(1)
                 keys: _col0 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
                   key expressions: _col0 (type: string)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: string)
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint)
           TableScan
             alias: s2
@@ -80,18 +82,18 @@ STAGE PLANS:
               outputColumnNames: _col0
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Union
-                Statistics: Num rows: 26 Data size: 279 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: count(1)
                   keys: _col0 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
                     key expressions: _col0 (type: string)
                     sort order: +
                     Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
@@ -99,10 +101,10 @@ STAGE PLANS:
           keys: KEY._col0 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
-          Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
             table:
                 input format: org.apache.hadoop.mapred.TextInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/unionDistinct_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/unionDistinct_1.q.out
index 8d74fbe..81c46da 100644
--- a/ql/src/test/results/clientpositive/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/unionDistinct_1.q.out
@@ -15008,11 +15008,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -15172,11 +15172,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/union_remove_21.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out
index 4743d8d..c956940 100644
--- a/ql/src/test/results/clientpositive/union_remove_21.q.out
+++ b/ql/src/test/results/clientpositive/union_remove_21.q.out
@@ -76,10 +76,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
@@ -121,10 +121,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
index f81816c..72dc004 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out
@@ -120,10 +120,10 @@ STAGE PLANS:
             Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: dc (type: decimal(38,18))
-              outputColumnNames: _col0
+              outputColumnNames: dc
               Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col0), max(_col0), sum(_col0), avg(_col0)
+                aggregations: min(dc), max(dc), sum(dc), avg(dc)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out b/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
index 1175cb8..cd4e4cc 100644
--- a/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
+++ b/ql/src/test/results/clientpositive/vector_aggregate_without_gby.q.out
@@ -53,10 +53,10 @@ STAGE PLANS:
               Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: dt (type: int), greg_dt (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: dt, greg_dt
                 Statistics: Num rows: 3 Data size: 306 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: max(_col0), max(_col1)
+                  aggregations: max(dt), max(greg_dt)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
index c3e4d52..7da8ae0 100644
--- a/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/vector_binary_join_groupby.q.out
@@ -213,11 +213,11 @@ STAGE PLANS:
             Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: bin (type: binary)
-              outputColumnNames: _col0
+              outputColumnNames: bin
               Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: binary)
+                keys: bin (type: binary)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/vector_count_distinct.q.out
index b87b2c7..0d491b3 100644
--- a/ql/src/test/results/clientpositive/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/vector_count_distinct.q.out
@@ -1252,11 +1252,11 @@ STAGE PLANS:
             Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ws_order_number (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: ws_order_number
               Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
-                keys: _col0 (type: int)
+                aggregations: count(DISTINCT ws_order_number)
+                keys: ws_order_number (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index 02c1c83..cf975d1 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -49,11 +49,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: cint, cdecimal1, cdecimal2
               Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), count()
-                keys: _col0 (type: int)
+                aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
+                keys: cint (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                 Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
@@ -147,11 +147,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: cint, cdecimal1, cdecimal2
               Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), avg(_col1), stddev_pop(_col1), stddev_samp(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), avg(_col2), stddev_pop(_col2), stddev_samp(_col2), count()
-                keys: _col0 (type: int)
+                aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
+                keys: cint (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
                 Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
index 16f2e3f..f2aaf8d 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out
@@ -562,10 +562,10 @@ STAGE PLANS:
             Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: dec (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: dec
               Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col0), sum(_col0)
+                aggregations: avg(dec), sum(dec)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
index 91a585c..cfd2a55 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
@@ -1574,11 +1574,11 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col1), count(_col1), avg(_col1)
-                keys: _col0 (type: int)
+                aggregations: sum(key), count(key), avg(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2239,11 +2239,11 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: stddev(_col1), variance(_col1)
-                keys: _col0 (type: int)
+                aggregations: stddev(key), variance(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2319,11 +2319,11 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: stddev_samp(_col1), var_samp(_col1)
-                keys: _col0 (type: int)
+                aggregations: stddev_samp(key), var_samp(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2458,10 +2458,10 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col0)
+                aggregations: min(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2518,10 +2518,10 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2578,10 +2578,10 @@ STAGE PLANS:
             Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
index 4153155..6b552be 100644
--- a/ql/src/test/results/clientpositive/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/vector_distinct_2.q.out
@@ -123,33 +123,37 @@ STAGE PLANS:
             alias: vectortab2korc
             Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: s (type: string), t (type: tinyint)
-              outputColumnNames: _col0, _col1
+              expressions: t (type: tinyint), s (type: string)
+              outputColumnNames: t, s
               Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: tinyint)
+                keys: t (type: tinyint), s (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: tinyint)
+                  key expressions: _col0 (type: tinyint), _col1 (type: string)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                  Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: string), _col0 (type: tinyint)
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
index af7cb58..ffe42a7 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_3.q.out
@@ -123,36 +123,40 @@ STAGE PLANS:
             alias: vectortab2korc
             Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: s (type: string), t (type: tinyint), b (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
+              expressions: t (type: tinyint), s (type: string), b (type: bigint)
+              outputColumnNames: t, s, b
               Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col2)
-                keys: _col0 (type: string), _col1 (type: tinyint)
+                aggregations: max(b)
+                keys: t (type: tinyint), s (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                 Reduce Output Operator
-                  key expressions: _col0 (type: string), _col1 (type: tinyint)
+                  key expressions: _col0 (type: tinyint), _col1 (type: string)
                   sort order: ++
-                  Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                  Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col2 (type: bigint)
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0)
-          keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2
           Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
+            outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
index edcb0b3..331ba4f 100644
--- a/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_groupby_reduce.q.out
@@ -240,10 +240,10 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ss_ticket_number (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: ss_ticket_number
               Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: ss_ticket_number (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
@@ -357,10 +357,10 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ss_ticket_number (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: ss_ticket_number
               Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: ss_ticket_number (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
index 69d6187..32e95c0 100644
--- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
@@ -121,10 +121,10 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: s_store_id (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: s_store_id
               Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), '0' (type: string)
+                keys: s_store_id (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
@@ -196,10 +196,10 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: s_store_id (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: s_store_id
               Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), '0' (type: string)
+                keys: s_store_id (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
index 2f986c2..2f8efd5 100644
--- a/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
+++ b/ql/src/test/results/clientpositive/vector_left_outer_join.q.out
@@ -25,14 +25,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -44,7 +44,7 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
index fdd7ea8..ee74fbe 100644
--- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
@@ -44,20 +44,16 @@ STAGE PLANS:
             Filter Operator
               predicate: l_partkey is not null (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: l_partkey (type: int)
+              Group By Operator
+                keys: l_partkey (type: int)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)
@@ -315,20 +311,16 @@ STAGE PLANS:
             Filter Operator
               predicate: l_partkey is not null (type: boolean)
               Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: l_partkey (type: int)
+              Group By Operator
+                keys: l_partkey (type: int)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
index 45ccc62..a3be243 100644
--- a/ql/src/test/results/clientpositive/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out
@@ -121,11 +121,11 @@ STAGE PLANS:
             Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: bo (type: boolean), b (type: bigint)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: bo, b
               Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1)
-                keys: _col0 (type: boolean)
+                aggregations: max(b)
+                keys: bo (type: boolean)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index 8e8f7fa..fd86093 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -489,14 +489,14 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:c 
+        $hdt$_1:c 
           TableScan
             alias: c
             Statistics: Num rows: 15 Data size: 3320 Basic stats: COMPLETE Column stats: NONE
@@ -508,7 +508,7 @@ STAGE PLANS:
                 keys:
                   0 _col1 (type: int)
                   1 _col0 (type: int)
-        $hdt$_0:$hdt$_2:c 
+        $hdt$_2:c 
           TableScan
             alias: c
             Statistics: Num rows: 15 Data size: 3320 Basic stats: COMPLETE Column stats: NONE


[15/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/having.q.out b/ql/src/test/results/clientpositive/tez/having.q.out
index 80f02de..31de85a 100644
--- a/ql/src/test/results/clientpositive/tez/having.q.out
+++ b/ql/src/test/results/clientpositive/tez/having.q.out
@@ -110,22 +110,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (UDFToDouble(key) <> 302.0) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: max(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: max(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                        value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -486,11 +482,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: max(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -759,22 +755,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (UDFToDouble(key) > 300.0) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: max(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: max(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                        value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -959,11 +951,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: max(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1227,11 +1219,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
index 01ccae7..14d57e3 100644
--- a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out
@@ -384,10 +384,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cdouble (type: double)
-                    outputColumnNames: _col0
+                    outputColumnNames: cdouble
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: double)
+                      keys: cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -473,10 +473,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ctinyint, cdouble
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -567,10 +567,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ctinyint, cdouble
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -663,11 +663,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: ctinyint, cstring1, cstring2
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col2)
-                      keys: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+                      aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2)
+                      keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -783,11 +783,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string), key (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: value, key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: sum(key)
+                      keys: value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1042,16 +1042,16 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: value (type: string), key (type: string)
-                    outputColumnNames: _col0, _col1
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: value (type: string)
                       sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                      Map-reduce partition columns: value (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.3
-                      value expressions: _col1 (type: string)
+                      value expressions: key (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
index 23df010..0bf689b 100644
--- a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out
@@ -617,22 +617,18 @@ STAGE PLANS:
                             1 Map 4
                           Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
                           HybridGraceHashJoin: true
-                          Select Operator
-                            expressions: _col2 (type: string)
-                            outputColumnNames: _col0
+                          Group By Operator
+                            aggregations: count()
+                            keys: _col2 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
                             Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                            Group By Operator
-                              aggregations: count()
-                              keys: _col0 (type: string)
-                              mode: hash
-                              outputColumnNames: _col0, _col1
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
                               Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                              Reduce Output Operator
-                                key expressions: _col0 (type: string)
-                                sort order: +
-                                Map-reduce partition columns: _col0 (type: string)
-                                Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                value expressions: _col1 (type: bigint)
+                              value expressions: _col1 (type: bigint)
         Map 3 
             Map Operator Tree:
                 TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out
index aaea52e..f43440e 100644
--- a/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out
+++ b/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out
@@ -448,10 +448,10 @@ STAGE PLANS:
                   Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ts (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ts
                     Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(ts)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
index 927b686..b55cb7e 100644
--- a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
+++ b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out
@@ -48,10 +48,10 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -155,10 +155,10 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -301,11 +301,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col0)
-                      keys: _col0 (type: string)
+                      aggregations: count(DISTINCT ds)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -448,10 +448,10 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -736,10 +736,10 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -1017,11 +1017,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string), hr (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ds, hr
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT hr)
+                      keys: ds (type: string), hr (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1261,11 +1261,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string), hr (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ds, hr
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(hr)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1500,10 +1500,10 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -1746,11 +1746,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: ds (type: string), hr (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ds, hr
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT hr)
+                      keys: ds (type: string), hr (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/mrr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/mrr.q.out b/ql/src/test/results/clientpositive/tez/mrr.q.out
index efbd02d..faace21 100644
--- a/ql/src/test/results/clientpositive/tez/mrr.q.out
+++ b/ql/src/test/results/clientpositive/tez/mrr.q.out
@@ -27,11 +27,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1304,22 +1304,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: count(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Map 6 
             Map Operator Tree:
                 TableScan
@@ -1328,22 +1324,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: count(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Map 9 
             Map Operator Tree:
                 TableScan
@@ -1352,22 +1344,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: count(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Reducer 10 
             Reduce Operator Tree:
               Group By Operator
@@ -1692,22 +1680,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: count(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Map 3 
             Map Operator Tree:
                 TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
index 0a091da..22ac115 100644
--- a/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/tez/selectDistinctStar.q.out
@@ -24,10 +24,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -718,20 +718,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (key < '3') (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -2474,10 +2470,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -3164,20 +3160,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (key < '3') (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
index eabda89..0412052 100644
--- a/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
+++ b/ql/src/test/results/clientpositive/tez/stats_only_null.q.out
@@ -90,10 +90,10 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                      aggregations: count(), count(a), count(b), count(c), count(d)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE
@@ -146,10 +146,10 @@ STAGE PLANS:
                   Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: double), b (type: int), c (type: string), d (type: smallint)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(), count(_col0), count(_col1), count(_col2), count(_col3)
+                      aggregations: count(), count(a), count(b), count(c), count(d)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1 Data size: 40 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/subquery_in.q.out b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
index 2b1237b..add2c52 100644
--- a/ql/src/test/results/clientpositive/tez/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/subquery_in.q.out
@@ -646,20 +646,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: ((key > '9') and value is not null) (type: boolean)
                     Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
@@ -829,20 +825,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/tez_dml.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_dml.q.out b/ql/src/test/results/clientpositive/tez/tez_dml.q.out
index ae85292..728eb6b 100644
--- a/ql/src/test/results/clientpositive/tez/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_dml.q.out
@@ -30,11 +30,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/union5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union5.q.out b/ql/src/test/results/clientpositive/tez/union5.q.out
index 327195c..9939b7d 100644
--- a/ql/src/test/results/clientpositive/tez/union5.q.out
+++ b/ql/src/test/results/clientpositive/tez/union5.q.out
@@ -31,33 +31,37 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Map 5 
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -65,22 +69,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 4 
             Reduce Operator Tree:
@@ -89,10 +93,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
@@ -103,22 +107,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst2' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                    Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Union 3 
             Vertex: Union 3

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/union7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union7.q.out b/ql/src/test/results/clientpositive/tez/union7.q.out
index c5ece62..7a02993 100644
--- a/ql/src/test/results/clientpositive/tez/union7.q.out
+++ b/ql/src/test/results/clientpositive/tez/union7.q.out
@@ -31,17 +31,19 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: s1
-                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
+                    expressions: key (type: string)
+                    outputColumnNames: key
+                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(1)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
-                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Map 5 
             Map Operator Tree:
@@ -57,12 +59,12 @@ STAGE PLANS:
                       keys: _col0 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         key expressions: _col0 (type: string)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                        Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
@@ -70,22 +72,22 @@ STAGE PLANS:
                 aggregations: count(VALUE._col0)
                 mode: mergepartial
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
                   expressions: 'tst1' (type: string)
                   outputColumnNames: _col0
-                  Statistics: Num rows: 1 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                   Group By Operator
                     aggregations: count(1)
                     keys: _col0 (type: string)
                     mode: hash
                     outputColumnNames: _col0, _col1
-                    Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                    Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
                       key expressions: _col0 (type: string)
                       sort order: +
                       Map-reduce partition columns: _col0 (type: string)
-                      Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                      Statistics: Num rows: 26 Data size: 199 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col1 (type: bigint)
         Reducer 4 
             Reduce Operator Tree:
@@ -94,10 +96,10 @@ STAGE PLANS:
                 keys: KEY._col0 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
-                Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
                   table:
                       input format: org.apache.hadoop.mapred.TextInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
index cab3a28..bcae2fc 100644
--- a/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
@@ -14059,11 +14059,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -14202,11 +14202,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
index 382380e..d6a8517 100644
--- a/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_aggregate_9.q.out
@@ -125,10 +125,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: dc (type: decimal(38,18))
-                    outputColumnNames: _col0
+                    outputColumnNames: dc
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0), max(_col0), sum(_col0), avg(_col0)
+                      aggregations: min(dc), max(dc), sum(dc), avg(dc)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 1 Data size: 336 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out b/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
index 8dcd40d..ebc0466 100644
--- a/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_binary_join_groupby.q.out
@@ -221,11 +221,11 @@ STAGE PLANS:
                   Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: bin (type: binary)
-                    outputColumnNames: _col0
+                    outputColumnNames: bin
                     Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: binary)
+                      keys: bin (type: binary)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 100 Data size: 29638 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
index 95863a3..f46bcd9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out
@@ -1258,10 +1258,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ws_order_number (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: ws_order_number
                     Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: ws_order_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
index 2e9c232..13e047b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
@@ -54,11 +54,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: cint, cdecimal1, cdecimal2
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), count()
-                      keys: _col0 (type: int)
+                      aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), count()
+                      keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                       Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
@@ -159,11 +159,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: cint, cdecimal1, cdecimal2
                     Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1), max(_col1), min(_col1), sum(_col1), avg(_col1), stddev_pop(_col1), stddev_samp(_col1), count(_col2), max(_col2), min(_col2), sum(_col2), avg(_col2), stddev_pop(_col2), stddev_samp(_col2), count()
-                      keys: _col0 (type: int)
+                      aggregations: count(cdecimal1), max(cdecimal1), min(cdecimal1), sum(cdecimal1), avg(cdecimal1), stddev_pop(cdecimal1), stddev_samp(cdecimal1), count(cdecimal2), max(cdecimal2), min(cdecimal2), sum(cdecimal2), avg(cdecimal2), stddev_pop(cdecimal2), stddev_samp(cdecimal2), count()
+                      keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
                       Statistics: Num rows: 12288 Data size: 2165060 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
index 4e1b654..8b6614e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out
@@ -567,10 +567,10 @@ STAGE PLANS:
                   Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: dec (type: decimal(20,10))
-                    outputColumnNames: _col0
+                    outputColumnNames: dec
                     Statistics: Num rows: 75 Data size: 3472 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: avg(_col0), sum(_col0)
+                      aggregations: avg(dec), sum(dec)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
index 13f0de2..dc929d3 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
@@ -1639,11 +1639,11 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: int), key (type: decimal(20,10))
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: value, key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col1), count(_col1), avg(_col1)
-                      keys: _col0 (type: int)
+                      aggregations: sum(key), count(key), avg(key)
+                      keys: value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2317,11 +2317,11 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: int), key (type: decimal(20,10))
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: value, key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: stddev(_col1), variance(_col1)
-                      keys: _col0 (type: int)
+                      aggregations: stddev(key), variance(key)
+                      keys: value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2403,11 +2403,11 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: int), key (type: decimal(20,10))
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: value, key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: stddev_samp(_col1), var_samp(_col1)
-                      keys: _col0 (type: int)
+                      aggregations: stddev_samp(key), var_samp(key)
+                      keys: value (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
@@ -2554,10 +2554,10 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: decimal(20,10))
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(key)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2621,10 +2621,10 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: decimal(20,10))
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(key)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2688,10 +2688,10 @@ STAGE PLANS:
                   Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: decimal(20,10))
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(key)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
index 1671ddf..6c31294 100644
--- a/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out
@@ -128,34 +128,38 @@ STAGE PLANS:
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: s (type: string), t (type: tinyint)
-                    outputColumnNames: _col0, _col1
+                    expressions: t (type: tinyint), s (type: string)
+                    outputColumnNames: t, s
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: tinyint)
+                      keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: tinyint)
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
-                keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: string), _col0 (type: tinyint)
+                  outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
index 9b9dcdd..6aa39f1 100644
--- a/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out
@@ -128,19 +128,19 @@ STAGE PLANS:
                   alias: vectortab2korc
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: s (type: string), t (type: tinyint), b (type: bigint)
-                    outputColumnNames: _col0, _col1, _col2
+                    expressions: t (type: tinyint), s (type: string), b (type: bigint)
+                    outputColumnNames: t, s, b
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col2)
-                      keys: _col0 (type: string), _col1 (type: tinyint)
+                      aggregations: max(b)
+                      keys: t (type: tinyint), s (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
-                        key expressions: _col0 (type: string), _col1 (type: tinyint)
+                        key expressions: _col0 (type: tinyint), _col1 (type: string)
                         sort order: ++
-                        Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint)
+                        Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: string)
                         Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col2 (type: bigint)
             Execution mode: vectorized
@@ -148,17 +148,21 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
-                keys: KEY._col0 (type: string), KEY._col1 (type: tinyint)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: string)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
+                Select Operator
+                  expressions: _col1 (type: string), _col0 (type: tinyint), _col2 (type: bigint)
+                  outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
-                  table:
-                      input format: org.apache.hadoop.mapred.TextInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  File Output Operator
+                    compressed: false
+                    Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE
+                    table:
+                        input format: org.apache.hadoop.mapred.TextInputFormat
+                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
             Execution mode: vectorized
 
   Stage: Stage-0

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
index ba2fb77..814ee39 100644
--- a/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_groupby_reduce.q.out
@@ -245,10 +245,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ss_ticket_number (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: ss_ticket_number
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: ss_ticket_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
@@ -369,10 +369,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ss_ticket_number (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: ss_ticket_number
                     Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: ss_ticket_number (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 88276 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
index 688d2ac..6d39d02 100644
--- a/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
@@ -126,10 +126,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: s_store_id (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: s_store_id
                     Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), '0' (type: string)
+                      keys: s_store_id (type: string), '0' (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE
@@ -207,10 +207,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: s_store_id (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: s_store_id
                     Statistics: Num rows: 12 Data size: 25632 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), '0' (type: string)
+                      keys: s_store_id (type: string), '0' (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 24 Data size: 51264 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
index e7fec82..d33b0ed 100644
--- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out
@@ -109,20 +109,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Reduce Operator Tree:
               Group By Operator
@@ -252,20 +248,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: l_partkey is not null (type: boolean)
                     Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: l_partkey (type: int)
+                    Group By Operator
+                      keys: l_partkey (type: int)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: int)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 50 Data size: 5999 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
             Reduce Operator Tree:
               Group By Operator


[39/41] hive git commit: HIVE-11841 : KeyValuesInputMerger creates huge logs (Rajesh Balamohan, reviewed by Gopal V)

Posted by se...@apache.org.
HIVE-11841 : KeyValuesInputMerger creates huge logs (Rajesh Balamohan, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e9c8d7c9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e9c8d7c9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e9c8d7c9

Branch: refs/heads/llap
Commit: e9c8d7c94f10d19ce5dc3b564f6ad1c53d4480b5
Parents: 68c0e99
Author: Sergey Shelukhin <se...@apache.org>
Authored: Fri Sep 18 11:24:44 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Fri Sep 18 11:24:44 2015 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java  | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e9c8d7c9/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java
index 9bc6418..2db2f98 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java
@@ -104,7 +104,6 @@ public class KeyValuesInputMerger extends KeyValuesReader {
 
     @Override
     public Object next() {
-      l4j.info("next called on " + currentIterator);
       return currentIterator.next();
     }
 


[03/41] hive git commit: HIVE-11802: Float-point numbers are displayed with different precision in Beeline/JDBC (Sergio Pena, reviewed by Carl Steinbach)

Posted by se...@apache.org.
HIVE-11802: Float-point numbers are displayed with different precision in Beeline/JDBC (Sergio Pena, reviewed by Carl Steinbach)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cabd4810
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cabd4810
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cabd4810

Branch: refs/heads/llap
Commit: cabd4810568e00f91b8f53f8d966d9d799897dbf
Parents: 9804918
Author: Sergio Pena <se...@cloudera.com>
Authored: Tue Sep 15 10:40:45 2015 -0500
Committer: Sergio Pena <se...@cloudera.com>
Committed: Tue Sep 15 10:40:45 2015 -0500

----------------------------------------------------------------------
 .../org/apache/hive/service/cli/Column.java     |   3 +-
 .../org/apache/hive/service/cli/TestColumn.java | 129 +++++++++++++++++++
 2 files changed, 131 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cabd4810/service/src/java/org/apache/hive/service/cli/Column.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/Column.java b/service/src/java/org/apache/hive/service/cli/Column.java
index 2e21f18..31091a3 100644
--- a/service/src/java/org/apache/hive/service/cli/Column.java
+++ b/service/src/java/org/apache/hive/service/cli/Column.java
@@ -40,6 +40,7 @@ import org.apache.hive.service.cli.thrift.TI16Column;
 import org.apache.hive.service.cli.thrift.TI32Column;
 import org.apache.hive.service.cli.thrift.TI64Column;
 import org.apache.hive.service.cli.thrift.TStringColumn;
+import sun.misc.FloatingDecimal;
 
 /**
  * Column.
@@ -349,7 +350,7 @@ public class Column extends AbstractList {
         break;
       case FLOAT_TYPE:
         nulls.set(size, field == null);
-        doubleVars()[size] = field == null ? 0 : ((Float)field).doubleValue();
+        doubleVars()[size] = field == null ? 0 : new FloatingDecimal((Float)field).doubleValue();
         break;
       case DOUBLE_TYPE:
         nulls.set(size, field == null);

http://git-wip-us.apache.org/repos/asf/hive/blob/cabd4810/service/src/test/org/apache/hive/service/cli/TestColumn.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/TestColumn.java b/service/src/test/org/apache/hive/service/cli/TestColumn.java
new file mode 100644
index 0000000..87bf848
--- /dev/null
+++ b/service/src/test/org/apache/hive/service/cli/TestColumn.java
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli;
+
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class TestColumn {
+  @Test
+  public void testAllIntegerTypeValues() {
+    Map<Type, List<Object>> integerTypesAndValues = new LinkedHashMap<Type, List<Object>>();
+
+    // Add TINYINT values
+    integerTypesAndValues.put(Type.TINYINT_TYPE, Arrays.<Object>asList(
+        Byte.MIN_VALUE, Byte.MAX_VALUE
+    ));
+
+    // Add SMALLINT values
+    integerTypesAndValues.put(Type.SMALLINT_TYPE, Arrays.<Object>asList(
+        Short.MIN_VALUE, Short.MIN_VALUE
+    ));
+
+    // Add INT values
+    integerTypesAndValues.put(Type.INT_TYPE, Arrays.<Object>asList(
+        Integer.MIN_VALUE, Integer.MAX_VALUE
+    ));
+
+    // Add BIGINT values
+    integerTypesAndValues.put(Type.BIGINT_TYPE, Arrays.<Object>asList(
+        Long.MIN_VALUE, Long.MAX_VALUE
+    ));
+
+    // Validate all integer type values are stored correctly
+    for (Map.Entry entry : integerTypesAndValues.entrySet()) {
+      Type type = (Type)entry.getKey();
+      List<Object> values = (List)entry.getValue();
+
+      Column c = new Column(type);
+      for (Object v : values) {
+        c.addValue(type, v);
+      }
+
+      assertEquals(type, c.getType());
+      assertEquals(values.size(), c.size());
+
+      for (int i=0; i<c.size(); i++) {
+        assertEquals(values.get(i), c.get(i));
+      }
+    }
+  }
+
+  @Test
+  public void testFloatAndDoubleValues() {
+    Column floatColumn = new Column(Type.FLOAT_TYPE);
+    floatColumn.addValue(Type.FLOAT_TYPE, 1.1f);
+    floatColumn.addValue(Type.FLOAT_TYPE, 2.033f);
+
+    // FLOAT_TYPE is treated as DOUBLE_TYPE
+    assertEquals(Type.DOUBLE_TYPE, floatColumn.getType());
+    assertEquals(2, floatColumn.size());
+    assertEquals(1.1, floatColumn.get(0));
+    assertEquals(2.033, floatColumn.get(1));
+
+    Column doubleColumn = new Column(Type.DOUBLE_TYPE);
+    doubleColumn.addValue(Type.DOUBLE_TYPE, 1.1);
+    doubleColumn.addValue(Type.DOUBLE_TYPE, 2.033);
+
+    assertEquals(Type.DOUBLE_TYPE, doubleColumn.getType());
+    assertEquals(2, doubleColumn.size());
+    assertEquals(1.1, doubleColumn.get(0));
+    assertEquals(2.033, doubleColumn.get(1));
+  }
+
+  @Test
+  public void testBooleanValues() {
+    Column boolColumn = new Column(Type.BOOLEAN_TYPE);
+    boolColumn.addValue(Type.BOOLEAN_TYPE, true);
+    boolColumn.addValue(Type.BOOLEAN_TYPE, false);
+
+    assertEquals(Type.BOOLEAN_TYPE, boolColumn.getType());
+    assertEquals(2, boolColumn.size());
+    assertEquals(true, boolColumn.get(0));
+    assertEquals(false, boolColumn.get(1));
+  }
+
+  @Test
+  public void testStringValues() {
+    Column stringColumn = new Column(Type.STRING_TYPE);
+    stringColumn.addValue(Type.STRING_TYPE, "12abc456");
+    stringColumn.addValue(Type.STRING_TYPE, "~special$&string");
+
+    assertEquals(Type.STRING_TYPE, stringColumn.getType());
+    assertEquals(2, stringColumn.size());
+    assertEquals("12abc456", stringColumn.get(0));
+    assertEquals("~special$&string", stringColumn.get(1));
+  }
+
+  @Test
+  public void testBinaryValues() {
+    Column binaryColumn = new Column(Type.BINARY_TYPE);
+    binaryColumn.addValue(Type.BINARY_TYPE, new byte[]{-1, 0, 3, 4});
+
+    assertEquals(Type.BINARY_TYPE, binaryColumn.getType());
+    assertEquals(1, binaryColumn.size());
+    assertArrayEquals(new byte[]{-1, 0, 3, 4}, (byte[]) binaryColumn.get(0));
+  }
+}


[29/41] hive git commit: HIVE-11583 : When PTF is used over a large partitions result could be corrupted (Illya Yalovyy via Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11583 : When PTF is used over a large partitions result could be corrupted (Illya Yalovyy via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8d524e06
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8d524e06
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8d524e06

Branch: refs/heads/llap
Commit: 8d524e062e6a8ad8c592e6067cea254c054797cd
Parents: 7be02ae
Author: Illya Yalovyy <ya...@amazon.com>
Authored: Mon Sep 14 10:18:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Sep 16 18:15:08 2015 -0700

----------------------------------------------------------------------
 .../ql/exec/persistence/PTFRowContainer.java    | 14 +++++----
 .../hive/ql/exec/persistence/RowContainer.java  | 12 +++++---
 .../exec/persistence/TestPTFRowContainer.java   | 31 ++++++++++++++------
 3 files changed, 38 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/8d524e06/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
index d2bfea6..61cc6e8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/PTFRowContainer.java
@@ -81,8 +81,8 @@ import org.apache.hadoop.util.Progressable;
  */
 public class PTFRowContainer<Row extends List<Object>> extends RowContainer<Row> {
 
-  ArrayList<BlockInfo> blockInfos;
-  int currentReadBlockStartRow;
+  private ArrayList<BlockInfo> blockInfos;
+  private int currentReadBlockStartRow;
 
   public PTFRowContainer(int bs, Configuration jc, Reporter reporter
       ) throws HiveException {
@@ -190,14 +190,16 @@ public class PTFRowContainer<Row extends List<Object>> extends RowContainer<Row>
 
     BlockInfo bI = blockInfos.get(blockNum);
     int startSplit = bI.startingSplit;
-    int endSplit = startSplit;
-    if ( blockNum != blockInfos.size() - 1) {
-      endSplit = blockInfos.get(blockNum+1).startingSplit;
+    int endSplit;
+    if ( blockNum != blockInfos.size() - 1 ) {
+      endSplit = blockInfos.get(blockNum + 1).startingSplit;
+    } else {
+      endSplit = getLastActualSplit();
     }
 
     try {
       int readIntoOffset = 0;
-      for(int i = startSplit; i <= endSplit; i++ ) {
+      for(int i = startSplit; i <= endSplit && readIntoOffset < getBlockSize(); i++ ) {
         org.apache.hadoop.mapred.RecordReader rr = setReaderAtSplit(i);
         if ( i == startSplit ) {
           ((PTFSequenceFileRecordReader)rr).seek(bI.startOffset);

http://git-wip-us.apache.org/repos/asf/hive/blob/8d524e06/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
index 4252bd1..68dc482 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java
@@ -103,7 +103,7 @@ public class RowContainer<ROW extends List<Object>>
 
   boolean firstCalled = false; // once called first, it will never be able to
   // write again.
-  int acutalSplitNum = 0;
+  private int actualSplitNum = 0;
   int currentSplitPointer = 0;
   org.apache.hadoop.mapred.RecordReader rr = null; // record reader
   RecordWriter rw = null;
@@ -220,7 +220,7 @@ public class RowContainer<ROW extends List<Object>>
           HiveConf.setVar(localJc, HiveConf.ConfVars.HADOOPMAPREDINPUTDIR,
               org.apache.hadoop.util.StringUtils.escapeString(parentFile.getAbsolutePath()));
           inputSplits = inputFormat.getSplits(localJc, 1);
-          acutalSplitNum = inputSplits.length;
+          actualSplitNum = inputSplits.length;
         }
         currentSplitPointer = 0;
         rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer],
@@ -375,7 +375,7 @@ public class RowContainer<ROW extends List<Object>>
         }
       }
 
-      if (nextSplit && this.currentSplitPointer < this.acutalSplitNum) {
+      if (nextSplit && this.currentSplitPointer < this.actualSplitNum) {
         JobConf localJc = getLocalFSJobConfClone(jc);
         // open record reader to read next split
         rr = inputFormat.getRecordReader(inputSplits[currentSplitPointer], jobCloneUsingLocalFs,
@@ -421,7 +421,7 @@ public class RowContainer<ROW extends List<Object>>
     addCursor = 0;
     numFlushedBlocks = 0;
     this.readBlockSize = 0;
-    this.acutalSplitNum = 0;
+    this.actualSplitNum = 0;
     this.currentSplitPointer = -1;
     this.firstCalled = false;
     this.inputSplits = null;
@@ -606,4 +606,8 @@ public class RowContainer<ROW extends List<Object>>
     clearRows();
     currentReadBlock = firstReadBlockPointer = currentWriteBlock = null;
   }
+
+  protected int getLastActualSplit() {
+    return actualSplitNum - 1;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/8d524e06/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
index a404ff0..0611072 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/persistence/TestPTFRowContainer.java
@@ -18,12 +18,14 @@
 
 package org.apache.hadoop.hive.ql.exec.persistence;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Properties;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.serde2.SerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -37,11 +39,13 @@ import org.apache.hadoop.io.Text;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
+
 @SuppressWarnings("deprecation")
 public class TestPTFRowContainer {
 
-  private static final String COL_NAMES = "x,y,z,a,b";
-  private static final String COL_TYPES = "int,string,double,int,string";
+  private static final String COL_NAMES = "x,y,z,a,b,v";
+  private static final String COL_TYPES = "int,string,double,int,string,string";
 
   static SerDe serDe;
   static Configuration cfg;
@@ -70,7 +74,7 @@ public class TestPTFRowContainer {
     return rc;
   }
 
-  private void runTest(int sz, int blockSize) throws SerDeException, HiveException {
+  private void runTest(int sz, int blockSize, String value) throws SerDeException, HiveException {
     List<Object> row;
 
     PTFRowContainer<List<Object>> rc = rowContainer(blockSize);
@@ -82,16 +86,17 @@ public class TestPTFRowContainer {
       row.add(new DoubleWritable(i));
       row.add(new IntWritable(i));
       row.add(new Text("def " + i));
+      row.add(new Text(value));
       rc.addRow(row);
     }
 
     // test forward scan
-    assert(rc.rowCount() == sz);
+    assertEquals(sz, rc.rowCount());
     i = 0;
     row = new ArrayList<Object>();
     row = rc.first();
     while(row != null ) {
-      assert(row.get(1).toString().equals("abc " + i));
+      assertEquals("abc " + i, row.get(1).toString());
       i++;
       row = rc.next();
     }
@@ -100,7 +105,7 @@ public class TestPTFRowContainer {
     row = rc.first();
     for(i = sz - 1; i >= 0; i-- ) {
       row = rc.getAt(i);
-      assert(row.get(1).toString().equals("abc " + i));
+      assertEquals("abc " + i, row.get(1).toString());
     }
 
     Random r = new Random(1000L);
@@ -109,20 +114,23 @@ public class TestPTFRowContainer {
     for(i=0; i < 100; i++) {
       int j = r.nextInt(sz);
       row = rc.getAt(j);
-      assert(row.get(1).toString().equals("abc " + j));
+      assertEquals("abc " + j, row.get(1).toString());
     }
 
     // intersperse getAt and next calls
     for(i=0; i < 100; i++) {
       int j = r.nextInt(sz);
       row = rc.getAt(j);
-      assert(row.get(1).toString().equals("abc " + j));
+      assertEquals("abc " + j, row.get(1).toString());
       for(int k = j + 1; k < j + (blockSize/4) && k < sz; k++) {
         row = rc.next();
-        assert(row.get(4).toString().equals("def " + k));
+        assertEquals("def " + k, row.get(4).toString());
       }
     }
+  }
 
+  private void runTest(int sz, int blockSize) throws SerDeException, HiveException {
+    runTest(sz, blockSize, "");
   }
 
   @Test
@@ -134,4 +142,9 @@ public class TestPTFRowContainer {
   public void testSmallBlockSize() throws SerDeException, HiveException {
     runTest(10 * 1000, 5);
   }
+
+  @Test
+  public void testBlocksLargerThanSplit() throws SerDeException, HiveException, IOException {
+    runTest(5, 2, new String(new char[(int)FileSystem.getLocal(cfg).getDefaultBlockSize()]));
+  }
 }


[31/41] hive git commit: HIVE-11789: Better support for functions recognition in CBO (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11789: Better support for functions recognition in CBO (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7201c264
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7201c264
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7201c264

Branch: refs/heads/llap
Commit: 7201c264a1fe8347fd87fc8c1bb835083e9aac75
Parents: 79244ab
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Sep 17 17:48:01 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Thu Sep 17 17:48:01 2015 +0100

----------------------------------------------------------------------
 .../calcite/reloperators/HiveBetween.java       | 75 ++++++++++++++++++++
 .../optimizer/calcite/reloperators/HiveIn.java  | 41 +++++++++++
 .../calcite/rules/HivePreFilteringRule.java     | 37 +++-------
 .../translator/SqlFunctionConverter.java        | 16 ++++-
 4 files changed, 142 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7201c264/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveBetween.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveBetween.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveBetween.java
new file mode 100644
index 0000000..2388939
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveBetween.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.rel.type.RelDataType;
+import org.apache.calcite.rel.type.RelDataTypeFactory;
+import org.apache.calcite.sql.SqlCallBinding;
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlNode;
+import org.apache.calcite.sql.SqlSpecialOperator;
+import org.apache.calcite.sql.type.ReturnTypes;
+import org.apache.calcite.sql.type.SqlOperandTypeInference;
+import org.apache.calcite.sql.type.SqlTypeName;
+
+public class HiveBetween extends SqlSpecialOperator {
+
+  public static final SqlSpecialOperator INSTANCE =
+          new HiveBetween();
+
+  private HiveBetween() {
+    super(
+        "BETWEEN",
+        SqlKind.BETWEEN,
+        30,
+        true,
+        ReturnTypes.BOOLEAN_NULLABLE,
+        FIRST_BOOLEAN_THEN_FIRST_KNOWN,
+        null);
+  }
+
+  /**
+   * Operand type-inference strategy where an unknown operand type is derived
+   * from the first operand with a known type, but the first operand is a boolean.
+   */
+  public static final SqlOperandTypeInference FIRST_BOOLEAN_THEN_FIRST_KNOWN =
+      new SqlOperandTypeInference() {
+        public void inferOperandTypes(
+            SqlCallBinding callBinding,
+            RelDataType returnType,
+            RelDataType[] operandTypes) {
+          final RelDataType unknownType =
+              callBinding.getValidator().getUnknownType();
+          RelDataType knownType = unknownType;
+          for (int i = 1; i < callBinding.getCall().getOperandList().size(); i++) {
+            SqlNode operand = callBinding.getCall().getOperandList().get(i);
+            knownType = callBinding.getValidator().deriveType(
+                callBinding.getScope(), operand);
+            if (!knownType.equals(unknownType)) {
+              break;
+            }
+          }
+
+          RelDataTypeFactory typeFactory = callBinding.getTypeFactory();
+          operandTypes[0] = typeFactory.createSqlType(SqlTypeName.BOOLEAN);
+          for (int i = 1; i < operandTypes.length; ++i) {
+            operandTypes[i] = knownType;
+          }
+        }
+      };
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7201c264/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIn.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIn.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIn.java
new file mode 100644
index 0000000..6d87003
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveIn.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators;
+
+import org.apache.calcite.sql.SqlKind;
+import org.apache.calcite.sql.SqlSpecialOperator;
+import org.apache.calcite.sql.type.InferTypes;
+import org.apache.calcite.sql.type.ReturnTypes;
+
+public class HiveIn extends SqlSpecialOperator {
+
+  public static final SqlSpecialOperator INSTANCE =
+          new HiveIn();
+
+  private HiveIn() {
+    super(
+        "IN",
+        SqlKind.IN,
+        30,
+        true,
+        ReturnTypes.BOOLEAN_NULLABLE,
+        InferTypes.FIRST_KNOWN,
+        null);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7201c264/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
index dde6288..3e2311c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HivePreFilteringRule.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.rules;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.Map.Entry;
 import java.util.Set;
@@ -41,22 +42,11 @@ import org.apache.calcite.rex.RexUtil;
 import org.apache.calcite.sql.SqlKind;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hive.ql.exec.Description;
 import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBetween;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualNS;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan;
-import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual;
 
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.LinkedHashMultimap;
 import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
 
 
 public class HivePreFilteringRule extends RelOptRule {
@@ -71,18 +61,13 @@ public class HivePreFilteringRule extends RelOptRule {
   private final FilterFactory filterFactory;
 
 
-  private static final Set<String> COMPARISON_UDFS = Sets.newHashSet(
-          GenericUDFOPEqual.class.getAnnotation(Description.class).name(),
-          GenericUDFOPEqualNS.class.getAnnotation(Description.class).name(),
-          GenericUDFOPEqualOrGreaterThan.class.getAnnotation(Description.class).name(),
-          GenericUDFOPEqualOrLessThan.class.getAnnotation(Description.class).name(),
-          GenericUDFOPGreaterThan.class.getAnnotation(Description.class).name(),
-          GenericUDFOPLessThan.class.getAnnotation(Description.class).name(),
-          GenericUDFOPNotEqual.class.getAnnotation(Description.class).name());
-  private static final String IN_UDF =
-          GenericUDFIn.class.getAnnotation(Description.class).name();
-  private static final String BETWEEN_UDF =
-          GenericUDFBetween.class.getAnnotation(Description.class).name();
+  private static final Set<SqlKind> COMPARISON = EnumSet.of(
+          SqlKind.EQUALS,
+          SqlKind.GREATER_THAN_OR_EQUAL,
+          SqlKind.LESS_THAN_OR_EQUAL,
+          SqlKind.GREATER_THAN,
+          SqlKind.LESS_THAN,
+          SqlKind.NOT_EQUALS);
 
 
   private HivePreFilteringRule() {
@@ -176,7 +161,7 @@ public class HivePreFilteringRule extends RelOptRule {
           continue;
         }
         RexCall conjCall = (RexCall) conjunction;
-        if(COMPARISON_UDFS.contains(conjCall.getOperator().getName())) {
+        if(COMPARISON.contains(conjCall.getOperator().getKind())) {
           if (conjCall.operands.get(0) instanceof RexInputRef &&
                   conjCall.operands.get(1) instanceof RexLiteral) {
             reductionCondition.put(conjCall.operands.get(0).toString(),
@@ -188,11 +173,11 @@ public class HivePreFilteringRule extends RelOptRule {
                     conjCall);
             addedToReductionCondition = true;
           }
-        } else if(conjCall.getOperator().getName().equals(IN_UDF)) {
+        } else if(conjCall.getOperator().getKind().equals(SqlKind.IN)) {
           reductionCondition.put(conjCall.operands.get(0).toString(),
                   conjCall);
           addedToReductionCondition = true;
-        } else if(conjCall.getOperator().getName().equals(BETWEEN_UDF)) {
+        } else if(conjCall.getOperator().getKind().equals(SqlKind.BETWEEN)) {
           reductionCondition.put(conjCall.operands.get(1).toString(),
                   conjCall);
           addedToReductionCondition = true;

http://git-wip-us.apache.org/repos/asf/hive/blob/7201c264/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
index 219289c..fd78824 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/SqlFunctionConverter.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
 import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException.UnsupportedFeature;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveBetween;
+import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveIn;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveParser;
 import org.apache.hadoop.hive.ql.parse.ParseDriver;
@@ -193,7 +195,16 @@ public class SqlFunctionConverter {
     HiveToken hToken = calciteToHiveToken.get(op);
     ASTNode node;
     if (hToken != null) {
-      node = (ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text);
+      switch (op.kind) {
+        case IN:
+        case BETWEEN:
+        case ROW:
+          node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
+          node.addChild((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text));
+          break;
+        default:
+          node = (ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text);
+      }
     } else {
       node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION");
       if (op.kind != SqlKind.CAST) {
@@ -296,6 +307,9 @@ public class SqlFunctionConverter {
           hToken(HiveParser.GREATERTHANOREQUALTO, ">="));
       registerFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not"));
       registerFunction("<>", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveParser.NOTEQUAL, "<>"));
+      registerFunction("in", HiveIn.INSTANCE, hToken(HiveParser.Identifier, "in"));
+      registerFunction("between", HiveBetween.INSTANCE, hToken(HiveParser.Identifier, "between"));
+      registerFunction("struct", SqlStdOperatorTable.ROW, hToken(HiveParser.Identifier, "struct"));
     }
 
     private void registerFunction(String name, SqlOperator calciteFn, HiveToken hiveToken) {


[21/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/auto_join18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join18.q.out b/ql/src/test/results/clientpositive/spark/auto_join18.q.out
index 8607221..349e76d 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join18.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join18.q.out
@@ -45,11 +45,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -66,10 +66,10 @@ STAGE PLANS:
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
index df737c2..92c827e 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join18_multi_distinct.q.out
@@ -47,11 +47,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -68,11 +68,11 @@ STAGE PLANS:
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col0)
-                      keys: _col0 (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT value), count(DISTINCT key)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/auto_join27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join27.q.out b/ql/src/test/results/clientpositive/spark/auto_join27.q.out
index 43313e0..a325533 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join27.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join27.q.out
@@ -60,20 +60,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (UDFToDouble(key) < 200.0) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
         Map 6 
             Map Operator Tree:
                 TableScan

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/auto_join32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/auto_join32.q.out b/ql/src/test/results/clientpositive/spark/auto_join32.q.out
index 316792b..679dd79 100644
--- a/ql/src/test/results/clientpositive/spark/auto_join32.q.out
+++ b/ql/src/test/results/clientpositive/spark/auto_join32.q.out
@@ -422,7 +422,8 @@ STAGE PLANS:
   Stage: Stage-1
     Spark
       Edges:
-        Reducer 2 <- Map 1 (GROUP PARTITION-LEVEL SORT, 2)
+        Reducer 2 <- Map 1 (GROUP, 2)
+        Reducer 3 <- Reducer 2 (GROUP, 2)
 #### A masked pattern was here ####
       Vertices:
         Map 1 
@@ -445,20 +446,16 @@ STAGE PLANS:
                           1 _col0 (type: string)
                         outputColumnNames: _col1, _col3
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                        Select Operator
-                          expressions: _col3 (type: string), _col1 (type: string)
+                        Group By Operator
+                          keys: _col1 (type: string), _col3 (type: string)
+                          mode: hash
                           outputColumnNames: _col0, _col1
                           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                          Group By Operator
-                            keys: _col0 (type: string), _col1 (type: string)
-                            mode: hash
-                            outputColumnNames: _col0, _col1
+                          Reduce Output Operator
+                            key expressions: _col0 (type: string), _col1 (type: string)
+                            sort order: ++
+                            Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                            Reduce Output Operator
-                              key expressions: _col0 (type: string), _col1 (type: string)
-                              sort order: ++
-                              Map-reduce partition columns: _col0 (type: string)
-                              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -467,18 +464,32 @@ STAGE PLANS:
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
-                  mode: complete
+                  aggregations: count(_col0)
+                  keys: _col1 (type: string)
+                  mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  File Output Operator
-                    compressed: false
+                  Reduce Output Operator
+                    key expressions: _col0 (type: string)
+                    sort order: +
+                    Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.TextInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                    value expressions: _col1 (type: bigint)
+        Reducer 3 
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                keys: KEY._col0 (type: string)
+                mode: mergepartial
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.TextInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/count.q.out b/ql/src/test/results/clientpositive/spark/count.q.out
index b2e9ffb..cb9eda5 100644
--- a/ql/src/test/results/clientpositive/spark/count.q.out
+++ b/ql/src/test/results/clientpositive/spark/count.q.out
@@ -53,11 +53,11 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col2), sum(_col3)
-                      keys: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                      aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
+                      keys: a (type: int), b (type: int), c (type: int)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                       Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
@@ -188,14 +188,14 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-                    outputColumnNames: _col0, _col1, _col2, _col3
+                    outputColumnNames: a, b, c, d
                     Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                      key expressions: a (type: int), b (type: int), c (type: int)
                       sort order: +++
-                      Map-reduce partition columns: _col0 (type: int)
+                      Map-reduce partition columns: a (type: int)
                       Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col3 (type: int)
+                      value expressions: d (type: int)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5_map.q.out b/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
index e775921..add3094 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5_map.q.out
@@ -31,10 +31,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(key)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
index 5c58934..924ef5d 100644
--- a/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby5_map_skew.q.out
@@ -31,10 +31,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(key)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
index 25bfbec..8b5a4c0 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out
@@ -223,11 +223,11 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, val
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT val)
+                      keys: key (type: string), '0' (type: string), val (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
@@ -408,11 +408,11 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, val
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT val)
+                      keys: key (type: string), '0' (type: string), val (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
index 088428b..99223b9 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out
@@ -588,20 +588,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean)
                     Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      keys: key (type: string), value (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string), _col1 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string), _col1 (type: string)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: string)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                          Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out b/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out
index 9d36099..cb2c9bd 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out
@@ -20,12 +20,12 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: key (type: string)
                       sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
@@ -71,12 +71,12 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: key (type: string)
                       sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                      Map-reduce partition columns: key (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Reduce Operator Tree:
@@ -123,10 +123,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: rand() (type: double)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -189,10 +189,10 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: key (type: string)
                       sort order: +
                       Map-reduce partition columns: rand() (type: double)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -254,11 +254,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -312,11 +312,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -371,11 +371,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -444,11 +444,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -620,22 +620,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (key < '12') (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                    Group By Operator
+                      aggregations: count()
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: count()
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: rand() (type: double)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: rand() (type: double)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: bigint)
+                        value expressions: _col1 (type: bigint)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
index 0a06c65..ce5998e 100644
--- a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out
@@ -127,11 +127,11 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, val
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT val)
+                      keys: key (type: string), '0' (type: string), val (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
@@ -306,11 +306,11 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), val (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, val
                     Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1)
-                      keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT val)
+                      keys: key (type: string), '0' (type: string), val (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/having.q.out b/ql/src/test/results/clientpositive/spark/having.q.out
index da59af9..ad3ef79 100644
--- a/ql/src/test/results/clientpositive/spark/having.q.out
+++ b/ql/src/test/results/clientpositive/spark/having.q.out
@@ -110,22 +110,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (UDFToDouble(key) <> 302.0) (type: boolean)
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: max(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: max(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                        value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -486,11 +482,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: max(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -759,22 +755,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (UDFToDouble(key) > 300.0) (type: boolean)
                     Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string), value (type: string)
+                    Group By Operator
+                      aggregations: max(value)
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: max(_col1)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col1 (type: string)
+                        value expressions: _col1 (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator
@@ -959,11 +951,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: max(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1227,11 +1219,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
index f39cd57..9b284e7 100644
--- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out
@@ -79,11 +79,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: string)
+                      keys: key (type: string)
                       mode: final
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/join18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join18.q.out b/ql/src/test/results/clientpositive/spark/join18.q.out
index c702fdf..e11ecec 100644
--- a/ql/src/test/results/clientpositive/spark/join18.q.out
+++ b/ql/src/test/results/clientpositive/spark/join18.q.out
@@ -48,11 +48,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -69,10 +69,10 @@ STAGE PLANS:
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string), _col1 (type: string)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
index 18d79d3..5560226 100644
--- a/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out
@@ -50,11 +50,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(value)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -71,11 +71,11 @@ STAGE PLANS:
                   Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: key (type: string), value (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: key, value
                     Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col0)
-                      keys: _col0 (type: string), _col1 (type: string)
+                      aggregations: count(DISTINCT value), count(DISTINCT key)
+                      keys: key (type: string), value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3
                       Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/join31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join31.q.out b/ql/src/test/results/clientpositive/spark/join31.q.out
index 108a1ea..469a19f 100644
--- a/ql/src/test/results/clientpositive/spark/join31.q.out
+++ b/ql/src/test/results/clientpositive/spark/join31.q.out
@@ -49,20 +49,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
+                    Group By Operator
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
         Reducer 5 
             Local Work:
               Map Reduce Local Work
@@ -92,20 +88,16 @@ STAGE PLANS:
                   Filter Operator
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
+                    Group By Operator
+                      keys: key (type: string)
+                      mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
         Reducer 2 
             Local Work:
               Map Reduce Local Work

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
index a34a399..6ca0527 100644
--- a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
+++ b/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out
@@ -553,10 +553,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: hr (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: hr
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: hr (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
index 2dc710c..b0e6c6a 100644
--- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out
@@ -384,10 +384,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cdouble (type: double)
-                    outputColumnNames: _col0
+                    outputColumnNames: cdouble
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: double)
+                      keys: cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
@@ -473,10 +473,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ctinyint, cdouble
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
@@ -567,10 +567,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ctinyint, cdouble
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
@@ -663,11 +663,11 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
-                    outputColumnNames: _col0, _col1, _col2
+                    outputColumnNames: ctinyint, cstring1, cstring2
                     Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(DISTINCT _col1), count(DISTINCT _col2)
-                      keys: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+                      aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2)
+                      keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
@@ -783,11 +783,11 @@ STAGE PLANS:
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: value (type: string), key (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: value, key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: sum(key)
+                      keys: value (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1042,16 +1042,16 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
-                    expressions: value (type: string), key (type: string)
-                    outputColumnNames: _col0, _col1
+                    expressions: key (type: string), value (type: string)
+                    outputColumnNames: key, value
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Reduce Output Operator
-                      key expressions: _col0 (type: string)
+                      key expressions: value (type: string)
                       sort order: +
-                      Map-reduce partition columns: _col0 (type: string)
+                      Map-reduce partition columns: value (type: string)
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                       TopN Hash Memory Usage: 0.3
-                      value expressions: _col1 (type: string)
+                      value expressions: key (type: string)
         Reducer 2 
             Reduce Operator Tree:
               Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
index 3b2c80e..549911c 100644
--- a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out
@@ -664,22 +664,18 @@ STAGE PLANS:
                           input vertices:
                             1 Map 4
                           Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                          Select Operator
-                            expressions: _col2 (type: string)
-                            outputColumnNames: _col0
+                          Group By Operator
+                            aggregations: count()
+                            keys: _col2 (type: string)
+                            mode: hash
+                            outputColumnNames: _col0, _col1
                             Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                            Group By Operator
-                              aggregations: count()
-                              keys: _col0 (type: string)
-                              mode: hash
-                              outputColumnNames: _col0, _col1
+                            Reduce Output Operator
+                              key expressions: _col0 (type: string)
+                              sort order: +
+                              Map-reduce partition columns: _col0 (type: string)
                               Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                              Reduce Output Operator
-                                key expressions: _col0 (type: string)
-                                sort order: +
-                                Map-reduce partition columns: _col0 (type: string)
-                                Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                value expressions: _col1 (type: bigint)
+                              value expressions: _col1 (type: bigint)
             Local Work:
               Map Reduce Local Work
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
index 18e1d4e..b2221fc 100644
--- a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
+++ b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out
@@ -448,10 +448,10 @@ STAGE PLANS:
                   Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ts (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: ts
                     Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(ts)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
index ed536c2..96defa6 100644
--- a/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
+++ b/ql/src/test/results/clientpositive/spark/ppd_join_filter.q.out
@@ -215,24 +215,20 @@ STAGE PLANS:
                     isSamplingPred: false
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                    Group By Operator
+                      aggregations: min(key)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: min(_col0)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          tag: -1
-                          value expressions: _col1 (type: string)
-                          auto parallelism: false
+                        tag: -1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -601,24 +597,20 @@ STAGE PLANS:
                     isSamplingPred: false
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                    Group By Operator
+                      aggregations: min(key)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: min(_col0)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          tag: -1
-                          value expressions: _col1 (type: string)
-                          auto parallelism: false
+                        tag: -1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:
@@ -981,11 +973,11 @@ STAGE PLANS:
                   GatherStats: false
                   Select Operator
                     expressions: key (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: key
                     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
-                      keys: _col0 (type: string)
+                      aggregations: min(key)
+                      keys: key (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1365,24 +1357,20 @@ STAGE PLANS:
                     isSamplingPred: false
                     predicate: key is not null (type: boolean)
                     Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: key (type: string)
-                      outputColumnNames: _col0
+                    Group By Operator
+                      aggregations: min(key)
+                      keys: key (type: string)
+                      mode: hash
+                      outputColumnNames: _col0, _col1
                       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: min(_col0)
-                        keys: _col0 (type: string)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
                         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: string)
-                          sort order: +
-                          Map-reduce partition columns: _col0 (type: string)
-                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                          tag: -1
-                          value expressions: _col1 (type: string)
-                          auto parallelism: false
+                        tag: -1
+                        value expressions: _col1 (type: string)
+                        auto parallelism: false
             Path -> Alias:
 #### A masked pattern was here ####
             Path -> Partition:


[07/41] hive git commit: HIVE-11821: JDK8 strict build broken for master (Gopal V, reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-11821: JDK8 strict build broken for master (Gopal V, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bc62a46d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bc62a46d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bc62a46d

Branch: refs/heads/llap
Commit: bc62a46d1a19f343fc74319c52f14a812f3c70fb
Parents: c689589
Author: Gopal V <go...@apache.org>
Authored: Tue Sep 15 17:21:36 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Tue Sep 15 17:21:50 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bc62a46d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
index 8ea1879..c357329 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.optimizer;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -350,8 +351,9 @@ public class ConvertJoinMapJoin implements NodeProcessor {
       // each side better have 0 or more RS. if either side is unbalanced, cannot convert.
       // This is a workaround for now. Right fix would be to refactor code in the
       // MapRecordProcessor and ReduceRecordProcessor with respect to the sources.
+      @SuppressWarnings({"rawtypes","unchecked"})
       Set<ReduceSinkOperator> set =
-          OperatorUtils.findOperatorsUpstream(parentOp.getParentOperators(),
+          OperatorUtils.findOperatorsUpstream((Collection)parentOp.getParentOperators(),
               ReduceSinkOperator.class);
       if (size < 0) {
         size = set.size();


[22/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/multiMapJoin2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multiMapJoin2.q.out b/ql/src/test/results/clientpositive/multiMapJoin2.q.out
index 2cbef2e..46b717f 100644
--- a/ql/src/test/results/clientpositive/multiMapJoin2.q.out
+++ b/ql/src/test/results/clientpositive/multiMapJoin2.q.out
@@ -527,10 +527,10 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -761,11 +761,11 @@ STAGE PLANS:
   Stage: Stage-17
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 
+        $hdt$_0:$hdt$_1:y1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 
+        $hdt$_0:$hdt$_1:y1 
           TableScan
             alias: y1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -833,11 +833,11 @@ STAGE PLANS:
   Stage: Stage-15
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$INTNAME1 
+        $INTNAME1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$INTNAME1 
+        $INTNAME1 
           TableScan
             HashTable Sink Operator
               keys:
@@ -919,11 +919,11 @@ STAGE PLANS:
   Stage: Stage-16
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$INTNAME 
+        $INTNAME 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$INTNAME 
+        $INTNAME 
           TableScan
             HashTable Sink Operator
               keys:
@@ -997,11 +997,11 @@ STAGE PLANS:
   Stage: Stage-18
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 
+        $hdt$_1:$hdt$_2:y1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 
+        $hdt$_1:$hdt$_2:y1 
           TableScan
             alias: y1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1163,14 +1163,14 @@ STAGE PLANS:
   Stage: Stage-9
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 
+        $hdt$_0:$hdt$_1:y1 
           Fetch Operator
             limit: -1
-        $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 
+        $hdt$_1:$hdt$_2:y1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 
+        $hdt$_0:$hdt$_1:y1 
           TableScan
             alias: y1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1185,7 +1185,7 @@ STAGE PLANS:
                   keys:
                     0 _col0 (type: string)
                     1 _col0 (type: string)
-        $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 
+        $hdt$_1:$hdt$_2:y1 
           TableScan
             alias: y1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1459,20 +1459,16 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string)
@@ -1492,11 +1488,11 @@ STAGE PLANS:
   Stage: Stage-12
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$INTNAME1 
+        $INTNAME1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$INTNAME1 
+        $INTNAME1 
           TableScan
             HashTable Sink Operator
               keys:
@@ -1515,22 +1511,18 @@ STAGE PLANS:
                 1 _col0 (type: string)
               outputColumnNames: _col1
               Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: _col1 (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: _col1 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Local Work:
         Map Reduce Local Work
 
@@ -1582,11 +1574,11 @@ STAGE PLANS:
   Stage: Stage-13
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$INTNAME 
+        $INTNAME 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$INTNAME 
+        $INTNAME 
           TableScan
             HashTable Sink Operator
               keys:
@@ -1605,22 +1597,18 @@ STAGE PLANS:
                 1 _col0 (type: string)
               outputColumnNames: _col1
               Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: _col1 (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: _col1 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                File Output Operator
+                  compressed: false
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
       Local Work:
         Map Reduce Local Work
 
@@ -1648,31 +1636,27 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col1
           Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: string)
-            outputColumnNames: _col0
+          Group By Operator
+            aggregations: count()
+            keys: _col1 (type: string)
+            mode: hash
+            outputColumnNames: _col0, _col1
             Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: count()
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-14
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 
+        $hdt$_0:$hdt$_1:x1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 
+        $hdt$_0:$hdt$_1:x1 
           TableScan
             alias: x1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1833,11 +1817,11 @@ STAGE PLANS:
   Stage: Stage-7
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 
+        $hdt$_0:$hdt$_1:x1 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 
+        $hdt$_0:$hdt$_1:x1 
           TableScan
             alias: x1
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -1862,20 +1846,16 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: x2
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1924,24 +1904,20 @@ STAGE PLANS:
                   1 _col0 (type: string)
                 outputColumnNames: _col1
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: string)
-                  outputColumnNames: _col0
+                Mux Operator
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Mux Operator
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: _col0 (type: string)
-                      mode: complete
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  Group By Operator
+                    aggregations: count()
+                    keys: _col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
           Group By Operator
             keys: KEY._col0 (type: string)
             mode: mergepartial
@@ -1957,24 +1933,20 @@ STAGE PLANS:
                   1 _col0 (type: string)
                 outputColumnNames: _col1
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col1 (type: string)
-                  outputColumnNames: _col0
+                Mux Operator
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Mux Operator
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: count()
-                      keys: _col0 (type: string)
-                      mode: complete
-                      outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      File Output Operator
-                        compressed: false
-                        table:
-                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                            serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+                  Group By Operator
+                    aggregations: count()
+                    keys: _col1 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
     Map Reduce
@@ -2228,11 +2200,11 @@ STAGE PLANS:
   Stage: Stage-15
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a 
+        null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:a 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a 
+        null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:a 
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -2380,11 +2352,11 @@ STAGE PLANS:
   Stage: Stage-16
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a 
+        null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:a 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a 
+        null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:a 
           TableScan
             alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
index feadd5a..9a24ad8 100644
--- a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
+++ b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
@@ -37,7 +37,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: -- This test query is introduced for HIVE-4968.
 -- First, we do not convert the join to MapJoin.
 EXPLAIN
@@ -140,7 +140,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join JOIN[12][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
       FROM (SELECT *
@@ -188,7 +188,7 @@ POSTHOOK: Input: default@src1
 406	val_406	25
 66	val_66	25
 98	val_98	25
-Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- Then, we convert the join to MapJoin.
 EXPLAIN
 SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
@@ -296,7 +296,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
+Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count
 FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count
       FROM (SELECT *

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/nonmr_fetch.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out
index a4ce905..9652d7e 100644
--- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out
+++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out
@@ -974,11 +974,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1028,10 +1028,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1081,10 +1081,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/partition_multilevels.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_multilevels.q.out b/ql/src/test/results/clientpositive/partition_multilevels.q.out
index 31862e1..c1c8778 100644
--- a/ql/src/test/results/clientpositive/partition_multilevels.q.out
+++ b/ql/src/test/results/clientpositive/partition_multilevels.q.out
@@ -991,11 +991,11 @@ STAGE PLANS:
             Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: '2222' (type: string), level2 (type: string), level3 (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: level1, level2, level3
               Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                keys: level1 (type: string), level2 (type: string), level3 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
@@ -1579,11 +1579,11 @@ STAGE PLANS:
             Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: '2222' (type: string), level2 (type: string), level3 (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: level1, level2, level3
               Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string), _col1 (type: string), _col2 (type: string)
+                keys: level1 (type: string), level2 (type: string), level3 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ppd_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby.q.out b/ql/src/test/results/clientpositive/ppd_gby.q.out
index 87cb907..6f8ee53 100644
--- a/ql/src/test/results/clientpositive/ppd_gby.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby.q.out
@@ -33,11 +33,11 @@ STAGE PLANS:
                   Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col1 (type: string), _col0 (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: _col1, _col0
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(_col0)
+                      keys: _col1 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -246,11 +246,11 @@ STAGE PLANS:
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: string), key (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col0
                 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: count(_col0)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ppd_gby2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_gby2.q.out b/ql/src/test/results/clientpositive/ppd_gby2.q.out
index bc00149..5fbe70f 100644
--- a/ql/src/test/results/clientpositive/ppd_gby2.q.out
+++ b/ql/src/test/results/clientpositive/ppd_gby2.q.out
@@ -36,11 +36,11 @@ STAGE PLANS:
                   Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: _col1 (type: string), _col0 (type: string)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: _col1, _col0
                     Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col1)
-                      keys: _col0 (type: string)
+                      aggregations: count(_col0)
+                      keys: _col1 (type: string)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 18 Data size: 191 Basic stats: COMPLETE Column stats: NONE
@@ -60,22 +60,18 @@ STAGE PLANS:
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
             Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: bigint), _col0 (type: string)
+            Group By Operator
+              aggregations: max(_col0)
+              keys: _col1 (type: bigint)
+              mode: hash
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: max(_col1)
-                keys: _col0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 6 Data size: 63 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -164,11 +160,11 @@ STAGE PLANS:
               Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: value (type: string), key (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col0
                 Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: count(_col0)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
@@ -188,22 +184,18 @@ STAGE PLANS:
           Filter Operator
             predicate: ((_col1 > 30) or (_col0 < 'val_400')) (type: boolean)
             Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: _col1 (type: bigint), _col0 (type: string)
+            Group By Operator
+              aggregations: max(_col0)
+              keys: _col1 (type: bigint)
+              mode: hash
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
-              Group By Operator
-                aggregations: max(_col1)
-                keys: _col0 (type: bigint)
-                mode: hash
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 18 Data size: 190 Basic stats: COMPLETE Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ppd_join_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/ppd_join_filter.q.out
index e99986f..1781fe9 100644
--- a/ql/src/test/results/clientpositive/ppd_join_filter.q.out
+++ b/ql/src/test/results/clientpositive/ppd_join_filter.q.out
@@ -136,24 +136,20 @@ STAGE PLANS:
               isSamplingPred: false
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: min(key)
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: min(_col0)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    value expressions: _col1 (type: string)
-                    auto parallelism: false
+                  tag: -1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -202,7 +198,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:a]
+        /src [$hdt$_1:a]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -561,24 +557,20 @@ STAGE PLANS:
               isSamplingPred: false
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: min(key)
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: min(_col0)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    value expressions: _col1 (type: string)
-                    auto parallelism: false
+                  tag: -1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -627,7 +619,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:a]
+        /src [$hdt$_1:a]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -976,11 +968,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col0)
-                keys: _col0 (type: string)
+                aggregations: min(key)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1040,7 +1032,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:a]
+        /src [$hdt$_1:a]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -1395,24 +1387,20 @@ STAGE PLANS:
               isSamplingPred: false
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: min(key)
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: min(_col0)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    tag: -1
-                    value expressions: _col1 (type: string)
-                    auto parallelism: false
+                  tag: -1
+                  value expressions: _col1 (type: string)
+                  auto parallelism: false
       Path -> Alias:
 #### A masked pattern was here ####
       Path -> Partition:
@@ -1461,7 +1449,7 @@ STAGE PLANS:
               name: default.src
             name: default.src
       Truncated Path -> Alias:
-        /src [$hdt$_1:$hdt$_1:a]
+        /src [$hdt$_1:a]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
index 7d1aff2..6f00df9 100644
--- a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
+++ b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
@@ -92,11 +92,11 @@ STAGE PLANS:
             Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_shipdate (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: l_shipdate
               Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
-                keys: _col0 (type: string)
+                aggregations: count(l_shipdate)
+                keys: l_shipdate (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
@@ -253,15 +253,15 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__lineitem_ix_lineitem_ix_lshipdate_idx__
+            alias: default__lineitem_ix_lineitem_ix_lshipdate_idx__
             Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint)
-              outputColumnNames: _col0, _count_of_l_shipdate
+              outputColumnNames: l_shipdate, _count_of_l_shipdate
               Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_l_shipdate)
-                keys: _col0 (type: string)
+                keys: l_shipdate (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
@@ -900,15 +900,15 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: null-subquery1:$hdt$_0-subquery1:$hdt$_0:default__lineitem_ix_lineitem_ix_lshipdate_idx__
+            alias: null-subquery1:$hdt$_0-subquery1:default__lineitem_ix_lineitem_ix_lshipdate_idx__
             Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_shipdate (type: string), _count_of_l_shipdate (type: bigint)
-              outputColumnNames: _col0, _count_of_l_shipdate
+              outputColumnNames: l_shipdate, _count_of_l_shipdate
               Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_l_shipdate)
-                keys: _col0 (type: string)
+                keys: l_shipdate (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
@@ -1015,11 +1015,11 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: 1 (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col0)
-                  keys: _col0 (type: int)
+                  aggregations: count(key)
+                  keys: key (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1063,15 +1063,15 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__tbl_tbl_key_idx__
+            alias: default__tbl_tbl_key_idx__
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int), _count_of_key (type: bigint)
-              outputColumnNames: _col0, _count_of_key
+              outputColumnNames: key, _count_of_key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_key)
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1161,11 +1161,11 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__tbl_tbl_key_idx__
+            alias: default__tbl_tbl_key_idx__
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int), _count_of_key (type: bigint)
-              outputColumnNames: _col0, _count_of_key
+              outputColumnNames: key, _count_of_key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_key)
@@ -1213,10 +1213,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1261,11 +1261,11 @@ STAGE PLANS:
             alias: tbl
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: value (type: int), key (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: int), value (type: int)
+              outputColumnNames: key, value
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int), _col1 (type: int)
+                keys: key (type: int), value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1281,7 +1281,7 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Select Operator
-            expressions: _col1 (type: int)
+            expressions: _col0 (type: int)
             outputColumnNames: _col0
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             File Output Operator
@@ -1318,10 +1318,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: 3 (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int)
+                  keys: key (type: int)
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1370,10 +1370,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: key
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int)
+                  keys: key (type: int)
                   mode: hash
                   outputColumnNames: _col0
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1471,11 +1471,11 @@ STAGE PLANS:
             alias: tbl
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
-              expressions: value (type: int), key (type: int)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: int), value (type: int)
+              outputColumnNames: key, value
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int), _col1 (type: int)
+                keys: key (type: int), value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1490,17 +1490,13 @@ STAGE PLANS:
           mode: mergepartial
           outputColumnNames: _col0, _col1
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-          Select Operator
-            expressions: _col1 (type: int), _col0 (type: int)
-            outputColumnNames: _col0, _col1
+          File Output Operator
+            compressed: false
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-            File Output Operator
-              compressed: false
-              Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              table:
-                  input format: org.apache.hadoop.mapred.TextInputFormat
-                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
 
   Stage: Stage-0
     Fetch Operator
@@ -1528,10 +1524,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int), 1 (type: int)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), _col1 (type: int)
+                  keys: key (type: int), value (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1577,10 +1573,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1626,10 +1622,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1675,10 +1671,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1724,10 +1720,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: key (type: int), value (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                keys: _col0 (type: int), _col1 (type: int)
+                keys: key (type: int), value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1776,10 +1772,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int), 2 (type: int)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), _col1 (type: int)
+                  keys: key (type: int), value (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1828,10 +1824,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: 3 (type: int), 2 (type: int)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), _col1 (type: int)
+                  keys: key (type: int), value (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1878,20 +1874,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (value = key) (type: boolean)
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Select Operator
-                expressions: key (type: int), value (type: int)
+              Group By Operator
+                keys: key (type: int), value (type: int)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int), _col1 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: int), _col1 (type: int)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int), _col1 (type: int)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: int), _col1 (type: int)
-                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int), KEY._col1 (type: int)
@@ -2033,10 +2025,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Select Operator
                 expressions: key (type: int), 2 (type: int)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: key, value
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: int), _col1 (type: int)
+                  keys: key (type: int), value (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -2168,22 +2160,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: int)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count(key)
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col0)
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -2260,18 +2248,18 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__tblpart_tbl_part_index__
+            alias: default__tblpart_tbl_part_index__
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (key < 10) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: key (type: int), _count_of_key (type: bigint)
-                outputColumnNames: _col0, _count_of_key
+                outputColumnNames: key, _count_of_key
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: sum(_count_of_key)
-                  keys: _col0 (type: int)
+                  keys: key (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
@@ -2369,11 +2357,11 @@ STAGE PLANS:
             Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
-                keys: _col0 (type: int)
+                aggregations: count(key)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 17 Data size: 70 Basic stats: COMPLETE Column stats: NONE
@@ -2453,15 +2441,15 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__tbl_tbl_key_idx__
+            alias: default__tbl_tbl_key_idx__
             Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: int), _count_of_key (type: bigint)
-              outputColumnNames: _col0, _count_of_key
+              outputColumnNames: key, _count_of_key
               Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_key)
-                keys: _col0 (type: int)
+                keys: key (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 430 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
index 3ee2e0f..667c683 100644
--- a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
+++ b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
@@ -176,11 +176,11 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
+            alias: default__lineitem_ix_lineitem_ix_l_orderkey_idx__
             Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_orderkey (type: int), _count_of_l_orderkey (type: bigint)
-              outputColumnNames: _col0, _count_of_l_orderkey
+              outputColumnNames: l_orderkey, _count_of_l_orderkey
               Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_l_orderkey)
@@ -247,11 +247,11 @@ STAGE PLANS:
             Statistics: Num rows: 1512 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_orderkey (type: int), l_partkey (type: int)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: l_orderkey, l_partkey
               Statistics: Num rows: 1512 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0), count(_col1)
-                keys: _col0 (type: int), _col1 (type: int)
+                aggregations: count(l_orderkey), count(l_partkey)
+                keys: l_orderkey (type: int), l_partkey (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1512 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
@@ -428,11 +428,11 @@ STAGE PLANS:
               Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: 7 (type: int)
-                outputColumnNames: _col0
+                outputColumnNames: l_orderkey
                 Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  aggregations: count(_col0)
-                  keys: _col0 (type: int)
+                  aggregations: count(l_orderkey)
+                  keys: l_orderkey (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 1512 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
@@ -1131,11 +1131,11 @@ STAGE PLANS:
             Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_orderkey (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: l_orderkey
               Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0), sum(_col0)
-                keys: _col0 (type: int)
+                aggregations: count(l_orderkey), sum(l_orderkey)
+                keys: l_orderkey (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3024 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
@@ -1223,15 +1223,15 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
+            alias: default__lineitem_ix_lineitem_ix_l_orderkey_idx__
             Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: l_orderkey (type: int), _count_of_l_orderkey (type: bigint)
-              outputColumnNames: _col0, _count_of_l_orderkey
+              outputColumnNames: l_orderkey, _count_of_l_orderkey
               Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: sum(_count_of_l_orderkey)
-                keys: _col0 (type: int)
+                keys: l_orderkey (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
@@ -1812,11 +1812,11 @@ STAGE PLANS:
           Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: bigint)
-            outputColumnNames: _col0
+            outputColumnNames: _col1
             Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col0)
-              keys: _col0 (type: bigint)
+              aggregations: count(_col1)
+              keys: _col1 (type: bigint)
               mode: hash
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 756 Data size: 3024 Basic stats: COMPLETE Column stats: NONE
@@ -1907,18 +1907,18 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0:$hdt$_0:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
+            alias: $hdt$_0:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
             Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (l_orderkey < 7) (type: boolean)
               Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int), _count_of_l_orderkey (type: bigint)
-                outputColumnNames: _col0, _count_of_l_orderkey
+                outputColumnNames: l_orderkey, _count_of_l_orderkey
                 Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: sum(_count_of_l_orderkey)
-                  keys: _col0 (type: int)
+                  keys: l_orderkey (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
@@ -1937,11 +1937,11 @@ STAGE PLANS:
           Statistics: Num rows: 4 Data size: 400 Basic stats: COMPLETE Column stats: NONE
           Select Operator
             expressions: _col1 (type: bigint)
-            outputColumnNames: _col0
+            outputColumnNames: _col1
             Statistics: Num rows: 4 Data size: 400 Basic stats: COMPLETE Column stats: NONE
             Group By Operator
-              aggregations: count(_col0)
-              keys: _col0 (type: bigint)
+              aggregations: count(_col1)
+              keys: _col1 (type: bigint)
               mode: hash
               outputColumnNames: _col0, _col1
               Statistics: Num rows: 4 Data size: 400 Basic stats: COMPLETE Column stats: NONE
@@ -3404,18 +3404,18 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0-subquery1:$hdt$_0-subquery1:$hdt$_0:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
+            alias: $hdt$_0-subquery1:$hdt$_0-subquery1:default__lineitem_ix_lineitem_ix_l_orderkey_idx__
             Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (l_orderkey < 7) (type: boolean)
               Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_orderkey (type: int), _count_of_l_orderkey (type: bigint)
-                outputColumnNames: _col0, _count_of_l_orderkey
+                outputColumnNames: l_orderkey, _count_of_l_orderkey
                 Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: sum(_count_of_l_orderkey)
-                  keys: _col0 (type: int)
+                  keys: l_orderkey (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 8 Data size: 801 Basic stats: COMPLETE Column stats: NONE
@@ -3495,18 +3495,18 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: $hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:default__lineitem_ix_lineitem_ix_l_partkey_idx__
+            alias: $hdt$_0-subquery2:$hdt$_0-subquery2:default__lineitem_ix_lineitem_ix_l_partkey_idx__
             Statistics: Num rows: 100 Data size: 8937 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: (l_partkey < 10) (type: boolean)
               Statistics: Num rows: 33 Data size: 2949 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: l_partkey (type: int), _count_of_l_partkey (type: bigint)
-                outputColumnNames: _col0, _count_of_l_partkey
+                outputColumnNames: l_partkey, _count_of_l_partkey
                 Statistics: Num rows: 33 Data size: 2949 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
                   aggregations: sum(_count_of_l_partkey)
-                  keys: _col0 (type: int)
+                  keys: l_partkey (type: int)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Statistics: Num rows: 33 Data size: 2949 Basic stats: COMPLETE Column stats: NONE
@@ -3633,20 +3633,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (l_orderkey < 7) (type: boolean)
               Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: l_orderkey (type: int)
+              Group By Operator
+                keys: l_orderkey (type: int)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)
@@ -3717,20 +3713,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (l_partkey < 10) (type: boolean)
               Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: l_partkey (type: int)
+              Group By Operator
+                keys: l_partkey (type: int)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: int)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: int)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: int)
-                    Statistics: Num rows: 1008 Data size: 4033 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out b/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out
index df16ab2..e3ebee7 100644
--- a/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out
+++ b/ql/src/test/results/clientpositive/reduce_deduplicate_extended.q.out
@@ -184,11 +184,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
-                keys: _col0 (type: string)
+                aggregations: sum(key)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -430,10 +430,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -488,10 +488,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -2682,12 +2682,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -2914,12 +2914,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
+                key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -2967,12 +2967,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
+                key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/selectDistinctStar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/selectDistinctStar.q.out b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
index abd6795..3f44580 100644
--- a/ql/src/test/results/clientpositive/selectDistinctStar.q.out
+++ b/ql/src/test/results/clientpositive/selectDistinctStar.q.out
@@ -19,10 +19,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -707,20 +707,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (key < '3') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -2449,10 +2445,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -3133,20 +3129,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (key < '3') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)


[24/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out b/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
index 6b003d5..2d72250 100644
--- a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
+++ b/ql/src/test/results/clientpositive/correlationoptimizer6.q.out
@@ -2650,22 +2650,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -2706,22 +2702,18 @@ STAGE PLANS:
             1 _col0 (type: string)
           outputColumnNames: _col0, _col1, _col3
           Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-          Select Operator
-            expressions: _col0 (type: string), _col1 (type: bigint), _col3 (type: bigint)
+          Group By Operator
+            aggregations: sum(_col1), sum(_col3)
+            keys: _col0 (type: string)
+            mode: hash
             outputColumnNames: _col0, _col1, _col2
             Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-            Group By Operator
-              aggregations: sum(_col1), sum(_col2)
-              keys: _col0 (type: string)
-              mode: hash
-              outputColumnNames: _col0, _col1, _col2
-              Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-              File Output Operator
-                compressed: false
-                table:
-                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-3
     Map Reduce
@@ -2803,22 +2795,18 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -2930,44 +2918,36 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
           TableScan
             alias: y
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
           TableScan
             alias: x
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -3003,39 +2983,35 @@ STAGE PLANS:
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: bigint), _col3 (type: bigint)
-                  outputColumnNames: _col0, _col1, _col2
+                Mux Operator
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Mux Operator
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: sum(_col1), sum(_col2)
-                      keys: _col0 (type: string)
-                      mode: complete
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      Mux Operator
-                        Statistics: Num rows: 514 Data size: 5411 Basic stats: COMPLETE Column stats: NONE
-                        Join Operator
-                          condition map:
-                               Inner Join 0 to 1
-                          keys:
-                            0 _col0 (type: string)
-                            1 _col0 (type: string)
-                          outputColumnNames: _col0, _col1, _col2, _col4
+                  Group By Operator
+                    aggregations: sum(_col1), sum(_col3)
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    Mux Operator
+                      Statistics: Num rows: 514 Data size: 5411 Basic stats: COMPLETE Column stats: NONE
+                      Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col0, _col1, _col2, _col4
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: bigint), _col0 (type: string), _col4 (type: string)
+                          outputColumnNames: _col0, _col1, _col2, _col3, _col4
                           Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                          Select Operator
-                            expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: bigint), _col0 (type: string), _col4 (type: string)
-                            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                          File Output Operator
+                            compressed: false
                             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                            File Output Operator
-                              compressed: false
-                              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                              table:
-                                  input format: org.apache.hadoop.mapred.TextInputFormat
-                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Group By Operator
             aggregations: count(VALUE._col0)
             keys: KEY._col0 (type: string)
@@ -3052,39 +3028,35 @@ STAGE PLANS:
                   1 _col0 (type: string)
                 outputColumnNames: _col0, _col1, _col3
                 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: string), _col1 (type: bigint), _col3 (type: bigint)
-                  outputColumnNames: _col0, _col1, _col2
+                Mux Operator
                   Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                  Mux Operator
-                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                    Group By Operator
-                      aggregations: sum(_col1), sum(_col2)
-                      keys: _col0 (type: string)
-                      mode: complete
-                      outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                      Mux Operator
-                        Statistics: Num rows: 514 Data size: 5411 Basic stats: COMPLETE Column stats: NONE
-                        Join Operator
-                          condition map:
-                               Inner Join 0 to 1
-                          keys:
-                            0 _col0 (type: string)
-                            1 _col0 (type: string)
-                          outputColumnNames: _col0, _col1, _col2, _col4
+                  Group By Operator
+                    aggregations: sum(_col1), sum(_col3)
+                    keys: _col0 (type: string)
+                    mode: complete
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                    Mux Operator
+                      Statistics: Num rows: 514 Data size: 5411 Basic stats: COMPLETE Column stats: NONE
+                      Join Operator
+                        condition map:
+                             Inner Join 0 to 1
+                        keys:
+                          0 _col0 (type: string)
+                          1 _col0 (type: string)
+                        outputColumnNames: _col0, _col1, _col2, _col4
+                        Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
+                        Select Operator
+                          expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: bigint), _col0 (type: string), _col4 (type: string)
+                          outputColumnNames: _col0, _col1, _col2, _col3, _col4
                           Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                          Select Operator
-                            expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: bigint), _col0 (type: string), _col4 (type: string)
-                            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                          File Output Operator
+                            compressed: false
                             Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                            File Output Operator
-                              compressed: false
-                              Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
-                              table:
-                                  input format: org.apache.hadoop.mapred.TextInputFormat
-                                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                            table:
+                                input format: org.apache.hadoop.mapred.TextInputFormat
+                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
           Mux Operator
             Statistics: Num rows: 514 Data size: 5411 Basic stats: COMPLETE Column stats: NONE
             Join Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/count.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/count.q.out b/ql/src/test/results/clientpositive/count.q.out
index e8d0cb3..c744b4f 100644
--- a/ql/src/test/results/clientpositive/count.q.out
+++ b/ql/src/test/results/clientpositive/count.q.out
@@ -48,11 +48,11 @@ STAGE PLANS:
             Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: a, b, c, d
               Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1), count(DISTINCT _col2), sum(_col3)
-                keys: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                aggregations: count(DISTINCT b), count(DISTINCT c), sum(d)
+                keys: a (type: int), b (type: int), c (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
@@ -171,14 +171,14 @@ STAGE PLANS:
             Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: int), b (type: int), c (type: int), d (type: int)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: a, b, c, d
               Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int)
+                key expressions: a (type: int), b (type: int), c (type: int)
                 sort order: +++
-                Map-reduce partition columns: _col0 (type: int)
+                Map-reduce partition columns: a (type: int)
                 Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col3 (type: int)
+                value expressions: d (type: int)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(DISTINCT KEY._col1:0._col0), count(DISTINCT KEY._col1:1._col0), sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 232d505..2a70faf 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -692,11 +692,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(value)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1125,22 +1125,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) < 9.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: count(value)
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count(_col1)
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: bigint)
+                  value expressions: _col2 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)
@@ -1256,22 +1252,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) < 9.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: max(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: string)
+                  value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/database.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/database.q.out b/ql/src/test/results/clientpositive/database.q.out
index 19b0ea2..02eb3d3 100644
--- a/ql/src/test/results/clientpositive/database.q.out
+++ b/ql/src/test/results/clientpositive/database.q.out
@@ -455,7 +455,7 @@ POSTHOOK: query: INSERT OVERWRITE TABLE temp_tbl2 SELECT COUNT(*) FROM temp_tbl
 POSTHOOK: type: QUERY
 POSTHOOK: Input: to_drop_db2@temp_tbl
 POSTHOOK: Output: to_drop_db2@temp_tbl2
-POSTHOOK: Lineage: temp_tbl2.c EXPRESSION []
+POSTHOOK: Lineage: temp_tbl2.c EXPRESSION [(temp_tbl)temp_tbl.null, ]
 PREHOOK: query: USE default
 PREHOOK: type: SWITCHDATABASE
 PREHOOK: Input: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/decimal_precision.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out
index 8d67e2a..69a6045 100644
--- a/ql/src/test/results/clientpositive/decimal_precision.q.out
+++ b/ql/src/test/results/clientpositive/decimal_precision.q.out
@@ -539,10 +539,10 @@ STAGE PLANS:
             Statistics: Num rows: 23 Data size: 2661 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: dec (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: dec
               Statistics: Num rows: 23 Data size: 2661 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col0), sum(_col0)
+                aggregations: avg(dec), sum(dec)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/decimal_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out
index 338b754..ce1fe3f 100644
--- a/ql/src/test/results/clientpositive/decimal_udf.q.out
+++ b/ql/src/test/results/clientpositive/decimal_udf.q.out
@@ -1291,11 +1291,11 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col1), count(_col1), avg(_col1)
-                keys: _col0 (type: int)
+                aggregations: sum(key), count(key), avg(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
@@ -1878,11 +1878,11 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: stddev(_col1), variance(_col1)
-                keys: _col0 (type: int)
+                aggregations: stddev(key), variance(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
@@ -1957,11 +1957,11 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: int), key (type: decimal(20,10))
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: stddev_samp(_col1), var_samp(_col1)
-                keys: _col0 (type: int)
+                aggregations: stddev_samp(key), var_samp(key)
+                keys: value (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
@@ -2095,10 +2095,10 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: min(_col0)
+                aggregations: min(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2154,10 +2154,10 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
@@ -2213,10 +2213,10 @@ STAGE PLANS:
             Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: decimal(20,10))
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/distinct_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/distinct_stats.q.out b/ql/src/test/results/clientpositive/distinct_stats.q.out
index 36049be..164a0a8 100644
--- a/ql/src/test/results/clientpositive/distinct_stats.q.out
+++ b/ql/src/test/results/clientpositive/distinct_stats.q.out
@@ -43,11 +43,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: a, b
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT b)
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -100,10 +100,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: b (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: b
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: b (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -151,11 +151,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: a
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: a (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
index 216a79c..2b5122f 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out
@@ -1518,35 +1518,35 @@ STAGE PLANS:
             Filter Operator
               predicate: (t is null or (t = 27)) (type: boolean)
               Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+              Group By Operator
+                keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                  sort order: +++++
+                  Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
                   Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    sort order: +++++
-                    Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.over1k_part2_orc
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.over1k_part2_orc
 
   Stage: Stage-0
     Move Operator
@@ -1572,9 +1572,8 @@ explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1586,57 +1585,35 @@ STAGE PLANS:
             Filter Operator
               predicate: (t is null or (t = 27)) (type: boolean)
               Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+              Group By Operator
+                keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                  sort order: +++++
+                  Map-reduce partition columns: _col0 (type: tinyint)
                   Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    sort order: +++++
-                    Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE
       Execution mode: vectorized
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col4 (type: tinyint)
-              sort order: +
-              Map-reduce partition columns: _col4 (type: tinyint)
-              Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.over1k_part2_orc
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.over1k_part2_orc
 
   Stage: Stage-0
     Move Operator
@@ -1651,7 +1628,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.over1k_part2_orc
 
-  Stage: Stage-3
+  Stage: Stage-2
     Stats-Aggr Operator
 
 PREHOOK: query: insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
index 41049bd..4caa587 100644
--- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
+++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out
@@ -1420,34 +1420,34 @@ STAGE PLANS:
             Filter Operator
               predicate: (t is null or (t = 27)) (type: boolean)
               Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+              Group By Operator
+                keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                  sort order: +++++
+                  Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
                   Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    sort order: +++++
-                    Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.over1k_part2
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.over1k_part2
 
   Stage: Stage-0
     Move Operator
@@ -1473,9 +1473,8 @@ explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
   Stage-1 is a root stage
-  Stage-2 depends on stages: Stage-1
-  Stage-0 depends on stages: Stage-2
-  Stage-3 depends on stages: Stage-0
+  Stage-0 depends on stages: Stage-1
+  Stage-2 depends on stages: Stage-0
 
 STAGE PLANS:
   Stage: Stage-1
@@ -1487,56 +1486,34 @@ STAGE PLANS:
             Filter Operator
               predicate: (t is null or (t = 27)) (type: boolean)
               Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint)
+              Group By Operator
+                keys: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float)
+                mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                Reduce Output Operator
+                  key expressions: _col0 (type: tinyint), _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float)
+                  sort order: +++++
+                  Map-reduce partition columns: _col0 (type: tinyint)
                   Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    sort order: +++++
-                    Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-                    Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
-          keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint)
+          keys: KEY._col0 (type: tinyint), KEY._col1 (type: smallint), KEY._col2 (type: int), KEY._col3 (type: bigint), KEY._col4 (type: float)
           mode: mergepartial
           outputColumnNames: _col0, _col1, _col2, _col3, _col4
           Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
-
-  Stage: Stage-2
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            Reduce Output Operator
-              key expressions: _col4 (type: tinyint)
-              sort order: +
-              Map-reduce partition columns: _col4 (type: tinyint)
-              Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-              value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint)
-      Reduce Operator Tree:
-        Select Operator
-          expressions: VALUE._col0 (type: smallint), VALUE._col1 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: tinyint)
-          outputColumnNames: _col0, _col1, _col2, _col3, _col4
-          Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-          File Output Operator
-            compressed: false
+          Select Operator
+            expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint)
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
             Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
-            table:
-                input format: org.apache.hadoop.mapred.TextInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-                name: default.over1k_part2
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.TextInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  name: default.over1k_part2
 
   Stage: Stage-0
     Move Operator
@@ -1551,7 +1528,7 @@ STAGE PLANS:
               serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
               name: default.over1k_part2
 
-  Stage: Stage-3
+  Stage: Stage-2
     Stats-Aggr Operator
 
 PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out
index 5d4d2af..c57a5ad 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_select_read_only_encrypted_tbl.q.out
@@ -29,11 +29,11 @@ POSTHOOK: Output: default@encrypted_table
 PREHOOK: query: SELECT count(*) FROM encrypted_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@encrypted_table
-#### A masked pattern was here ####
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging
 POSTHOOK: query: SELECT count(*) FROM encrypted_table
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@encrypted_table
-#### A masked pattern was here ####
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table/.hive-staging
 500
 PREHOOK: query: drop table encrypted_table PURGE
 PREHOOK: type: DROPTABLE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/explain_logical.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out
index 545034a..8fa0a4c 100644
--- a/ql/src/test/results/clientpositive/explain_logical.q.out
+++ b/ql/src/test/results/clientpositive/explain_logical.q.out
@@ -804,20 +804,20 @@ $hdt$_0:src
   TableScan (TS_0)
     alias: src
     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-    Filter Operator (FIL_18)
+    Filter Operator (FIL_17)
       predicate: key is not null (type: boolean)
       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Select Operator (SEL_1)
         expressions: key (type: string), value (type: string)
         outputColumnNames: _col0, _col1
         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-        Reduce Output Operator (RS_10)
+        Reduce Output Operator (RS_9)
           key expressions: _col0 (type: string)
           sort order: +
           Map-reduce partition columns: _col0 (type: string)
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
           value expressions: _col1 (type: string)
-          Join Operator (JOIN_13)
+          Join Operator (JOIN_12)
             condition map:
                  Inner Join 0 to 1
             keys:
@@ -825,67 +825,63 @@ $hdt$_0:src
               1 _col0 (type: string)
             outputColumnNames: _col0, _col1, _col3
             Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-            Select Operator (SEL_14)
+            Select Operator (SEL_13)
               expressions: _col0 (type: string), _col3 (type: bigint), _col1 (type: string)
               outputColumnNames: _col0, _col1, _col2
               Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator (RS_15)
+              Reduce Output Operator (RS_14)
                 key expressions: _col0 (type: string)
                 sort order: +
                 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                 value expressions: _col1 (type: bigint), _col2 (type: string)
-                Select Operator (SEL_16)
+                Select Operator (SEL_15)
                   expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: string)
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator (FS_17)
+                  File Output Operator (FS_16)
                     compressed: false
                     Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.TextInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                         serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-$hdt$_1:$hdt$_1:src 
+$hdt$_1:src 
   TableScan (TS_2)
     alias: src
     Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-    Filter Operator (FIL_19)
+    Filter Operator (FIL_18)
       predicate: key is not null (type: boolean)
       Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-      Select Operator (SEL_3)
-        expressions: key (type: string), value (type: string)
+      Group By Operator (GBY_4)
+        aggregations: count(value)
+        keys: key (type: string)
+        mode: hash
         outputColumnNames: _col0, _col1
         Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-        Group By Operator (GBY_5)
-          aggregations: count(_col1)
-          keys: _col0 (type: string)
-          mode: hash
-          outputColumnNames: _col0, _col1
+        Reduce Output Operator (RS_5)
+          key expressions: _col0 (type: string)
+          sort order: +
+          Map-reduce partition columns: _col0 (type: string)
           Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-          Reduce Output Operator (RS_6)
-            key expressions: _col0 (type: string)
-            sort order: +
-            Map-reduce partition columns: _col0 (type: string)
-            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-            value expressions: _col1 (type: bigint)
-            Group By Operator (GBY_7)
-              aggregations: count(VALUE._col0)
-              keys: KEY._col0 (type: string)
-              mode: mergepartial
-              outputColumnNames: _col0, _col1
+          value expressions: _col1 (type: bigint)
+          Group By Operator (GBY_6)
+            aggregations: count(VALUE._col0)
+            keys: KEY._col0 (type: string)
+            mode: mergepartial
+            outputColumnNames: _col0, _col1
+            Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+            Reduce Output Operator (RS_11)
+              key expressions: _col0 (type: string)
+              sort order: +
+              Map-reduce partition columns: _col0 (type: string)
               Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-              Reduce Output Operator (RS_12)
-                key expressions: _col0 (type: string)
-                sort order: +
-                Map-reduce partition columns: _col0 (type: string)
-                Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                value expressions: _col1 (type: bigint)
-                Join Operator (JOIN_13)
-                  condition map:
-                       Inner Join 0 to 1
-                  keys:
-                    0 _col0 (type: string)
-                    1 _col0 (type: string)
-                  outputColumnNames: _col0, _col1, _col3
-                  Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col1 (type: bigint)
+              Join Operator (JOIN_12)
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col0, _col1, _col3
+                Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/fetch_aggregation.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/fetch_aggregation.q.out b/ql/src/test/results/clientpositive/fetch_aggregation.q.out
index 121d6a4..91f47f8 100644
--- a/ql/src/test/results/clientpositive/fetch_aggregation.q.out
+++ b/ql/src/test/results/clientpositive/fetch_aggregation.q.out
@@ -17,10 +17,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0), sum(_col0), avg(_col0), min(_col0), max(_col0), std(_col0), variance(_col0)
+                aggregations: count(key), sum(key), avg(key), min(key), max(key), std(key), variance(key)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                 Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/gby_star.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/gby_star.q.out b/ql/src/test/results/clientpositive/gby_star.q.out
index a49f1bf..fb71835 100644
--- a/ql/src/test/results/clientpositive/gby_star.q.out
+++ b/ql/src/test/results/clientpositive/gby_star.q.out
@@ -17,11 +17,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: sum(key)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -93,22 +93,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) < 100.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
-                outputColumnNames: _col0, _col1
+              Group By Operator
+                aggregations: sum(key)
+                keys: key (type: string), value (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: sum(_col0)
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1, _col2
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col2 (type: double)
+                  value expressions: _col2 (type: double)
       Reduce Operator Tree:
         Group By Operator
           aggregations: sum(VALUE._col0)
@@ -171,22 +167,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) < 100.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: sum(key)
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: sum(_col0)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: double)
+                  value expressions: _col1 (type: double)
       Reduce Operator Tree:
         Group By Operator
           aggregations: sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby12.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby12.q.out b/ql/src/test/results/clientpositive/groupby12.q.out
index 6eb3e96..b17da54 100644
--- a/ql/src/test/results/clientpositive/groupby12.q.out
+++ b/ql/src/test/results/clientpositive/groupby12.q.out
@@ -28,12 +28,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string), _col1 (type: string)
+                key expressions: key (type: string), value (type: string)
                 sort order: ++
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby5_map.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_map.q.out b/ql/src/test/results/clientpositive/groupby5_map.q.out
index ea8ce4a..5fbd3d7 100644
--- a/ql/src/test/results/clientpositive/groupby5_map.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_map.q.out
@@ -26,10 +26,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
index c0fced7..60b010b 100644
--- a/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
+++ b/ql/src/test/results/clientpositive/groupby5_map_skew.q.out
@@ -26,10 +26,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col0)
+                aggregations: sum(key)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_cube1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out
index 62e9c54..f5a2d1c 100644
--- a/ql/src/test/results/clientpositive/groupby_cube1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out
@@ -206,11 +206,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, val
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT val)
+                keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
@@ -389,11 +389,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, val
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT val)
+                keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out
index bd92927..b7121d5 100644
--- a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out
+++ b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out
@@ -115,11 +115,11 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: int1 (type: int)
-              outputColumnNames: _col0
+              outputColumnNames: int1
               Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(DISTINCT _col0)
-                keys: _col0 (type: int)
+                aggregations: sum(DISTINCT int1)
+                keys: int1 (type: int)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 8 Data size: 120 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
index 6d44f32..6f9237d 100644
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
+++ b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out
@@ -36,11 +36,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: a, b
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
@@ -143,11 +143,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: a, b, c
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col2)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: sum(c)
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
index 5d1a7a8..d3e6bee 100644
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
+++ b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out
@@ -55,11 +55,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: a, b, c
               Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col2), count()
-                keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                aggregations: avg(c), count()
+                keys: a (type: string), b (type: string), '0' (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 4 Data size: 288 Basic stats: COMPLETE Column stats: NONE
@@ -143,11 +143,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string), c (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: a, b, c
               Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: avg(_col2), count()
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: avg(c), count()
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 72 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
index bd7c2ff..e022297 100644
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
+++ b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out
@@ -42,10 +42,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: a, b
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
@@ -159,10 +159,10 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: a (type: string), b (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: a, b
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string), _col1 (type: string)
+                keys: a (type: string), b (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
index d807d44..1d7b569 100644
--- a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
+++ b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out
@@ -42,10 +42,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: '5' (type: string), b (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: a, b
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                  keys: a (type: string), b (type: string), '0' (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 2 Data size: 72 Basic stats: COMPLETE Column stats: NONE
@@ -117,10 +117,10 @@ STAGE PLANS:
               Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
               Select Operator
                 expressions: '5' (type: string), b (type: string)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: a, b
                 Statistics: Num rows: 1 Data size: 36 Basic stats: COMPLETE Column stats: NONE
                 Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string), '0' (type: string)
+                  keys: a (type: string), b (type: string), '0' (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1, _col2
                   Statistics: Num rows: 2 Data size: 72 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_position.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out
index 7152101..3472134 100644
--- a/ql/src/test/results/clientpositive/groupby_position.q.out
+++ b/ql/src/test/results/clientpositive/groupby_position.q.out
@@ -563,20 +563,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (((((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) and (UDFToDouble(key) > 15.0)) and (UDFToDouble(key) < 25.0)) and key is not null) (type: boolean)
               Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -653,20 +649,16 @@ STAGE PLANS:
             Filter Operator
               predicate: (((((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) and (UDFToDouble(key) > 10.0)) and (UDFToDouble(key) < 20.0)) and key is not null) (type: boolean)
               Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                keys: key (type: string), value (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string), _col1 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string), _col1 (type: string)
+                  sort order: ++
+                  Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                   Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string), _col1 (type: string)
-                    sort order: ++
-                    Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
-                    Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string), KEY._col1 (type: string)


[14/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out
index af15925..3712b16 100644
--- a/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out
@@ -126,11 +126,11 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: bo (type: boolean), b (type: bigint)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: bo, b
                     Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col1)
-                      keys: _col0 (type: boolean)
+                      aggregations: max(b)
+                      keys: bo (type: boolean)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
index f9159eb..2315cb9 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
@@ -227,19 +227,15 @@ STAGE PLANS:
                           1 Map 4
                         Statistics: Num rows: 24 Data size: 5361 Basic stats: COMPLETE Column stats: NONE
                         HybridGraceHashJoin: true
-                        Select Operator
-                          expressions: _col1 (type: bigint)
-                          outputColumnNames: _col0
-                          Statistics: Num rows: 24 Data size: 5361 Basic stats: COMPLETE Column stats: NONE
-                          Group By Operator
-                            aggregations: count(), sum(_col0)
-                            mode: hash
-                            outputColumnNames: _col0, _col1
+                        Group By Operator
+                          aggregations: count(), sum(_col1)
+                          mode: hash
+                          outputColumnNames: _col0, _col1
+                          Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
+                          Reduce Output Operator
+                            sort order: 
                             Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                            Reduce Output Operator
-                              sort order: 
-                              Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
-                              value expressions: _col0 (type: bigint), _col1 (type: bigint)
+                            value expressions: _col0 (type: bigint), _col1 (type: bigint)
             Execution mode: vectorized
         Map 3 
             Map Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out b/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out
index 65b2ff1..c9ceeb4 100644
--- a/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_partition_diff_num_cols.q.out
@@ -100,10 +100,10 @@ STAGE PLANS:
                   Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: inv_quantity_on_hand
                     Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(inv_quantity_on_hand)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -226,10 +226,10 @@ STAGE PLANS:
                   Statistics: Num rows: 200 Data size: 11876 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: inv_quantity_on_hand
                     Statistics: Num rows: 200 Data size: 11876 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(inv_quantity_on_hand)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -353,10 +353,10 @@ STAGE PLANS:
                   Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: inv_quantity_on_hand
                     Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(inv_quantity_on_hand)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -461,10 +461,10 @@ STAGE PLANS:
                   Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: inv_quantity_on_hand
                     Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(inv_quantity_on_hand)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -575,10 +575,10 @@ STAGE PLANS:
                   Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: inv_quantity_on_hand (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: inv_quantity_on_hand
                     Statistics: Num rows: 200 Data size: 3176 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(inv_quantity_on_hand)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
index 4b45ac1..70d415d 100644
--- a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
@@ -379,11 +379,11 @@ STAGE PLANS:
                   Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: fl_date (type: date)
-                    outputColumnNames: _col0
+                    outputColumnNames: fl_date
                     Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: date)
+                      keys: fl_date (type: date)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 137 Data size: 39456 Basic stats: COMPLETE Column stats: NONE
@@ -1159,11 +1159,11 @@ STAGE PLANS:
                   Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: fl_date (type: date)
-                    outputColumnNames: _col0
+                    outputColumnNames: fl_date
                     Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: date)
+                      keys: fl_date (type: date)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 137 Data size: 31776 Basic stats: COMPLETE Column stats: NONE
@@ -1963,11 +1963,11 @@ STAGE PLANS:
                   Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: fl_time (type: timestamp)
-                    outputColumnNames: _col0
+                    outputColumnNames: fl_time
                     Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
                       aggregations: count()
-                      keys: _col0 (type: timestamp)
+                      keys: fl_time (type: timestamp)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 137 Data size: 33968 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index 528c2cb..14aa777 100644
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@ -44,22 +44,18 @@ STAGE PLANS:
                   Filter Operator
                     predicate: (cdecimal1 is not null and cdecimal2 is not null) (type: boolean)
                     Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                    Group By Operator
+                      aggregations: min(cdecimal1)
+                      keys: cint (type: int), cdouble (type: double), cdecimal1 (type: decimal(20,10)), cdecimal2 (type: decimal(23,14))
+                      mode: hash
+                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
                       Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                      Group By Operator
-                        aggregations: min(_col2)
-                        keys: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                        mode: hash
-                        outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
+                        sort order: ++++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
                         Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                          sort order: ++++
-                          Map-reduce partition columns: _col0 (type: int), _col1 (type: double), _col2 (type: decimal(20,10)), _col3 (type: decimal(23,14))
-                          Statistics: Num rows: 1526 Data size: 360136 Basic stats: COMPLETE Column stats: NONE
-                          value expressions: _col4 (type: decimal(20,10))
+                        value expressions: _col4 (type: decimal(20,10))
             Execution mode: vectorized
         Reducer 2 
             Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
index 18e042d..4580fe0 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
@@ -133,10 +133,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctinyint
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(ctinyint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -439,10 +439,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cbigint (type: bigint)
-                    outputColumnNames: _col0
+                    outputColumnNames: cbigint
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(cbigint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -745,10 +745,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cfloat (type: float)
-                    outputColumnNames: _col0
+                    outputColumnNames: cfloat
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: sum(_col0)
+                      aggregations: sum(cfloat)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -998,10 +998,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: cbigint, cfloat, ctinyint
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), stddev_pop(_col0), var_samp(_col0), count(), sum(_col1), min(_col2)
+                        aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
index 6214640..d153dd9 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
@@ -89,19 +89,19 @@ STAGE PLANS:
                     predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                        keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                        aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                        keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           sort order: +++++
-                          Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized
@@ -109,12 +109,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-                keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+                  expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
@@ -342,19 +342,19 @@ STAGE PLANS:
                     predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                     Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      expressions: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cfloat, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col3), stddev_pop(_col3), stddev_pop(_col1), max(_col3), min(_col1)
-                        keys: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                        aggregations: max(ctinyint), sum(cfloat), stddev_pop(cfloat), stddev_pop(ctinyint), max(cfloat), min(ctinyint)
+                        keys: ctinyint (type: tinyint), cfloat (type: float), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          key expressions: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           sort order: +++++
-                          Map-reduce partition columns: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: float), _col2 (type: string), _col3 (type: timestamp), _col4 (type: boolean)
                           Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col5 (type: tinyint), _col6 (type: double), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,variance:double>), _col9 (type: float), _col10 (type: tinyint)
             Execution mode: vectorized
@@ -362,12 +362,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0), sum(VALUE._col1), stddev_pop(VALUE._col2), stddev_pop(VALUE._col3), max(VALUE._col4), min(VALUE._col5)
-                keys: KEY._col0 (type: boolean), KEY._col1 (type: tinyint), KEY._col2 (type: timestamp), KEY._col3 (type: float), KEY._col4 (type: string)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: float), KEY._col2 (type: string), KEY._col3 (type: timestamp), KEY._col4 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                 Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: boolean), _col1 (type: tinyint), _col2 (type: timestamp), _col3 (type: float), _col4 (type: string), (- _col1) (type: tinyint), _col5 (type: tinyint), ((- _col1) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col1) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col3)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col1) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col1) + _col5))) / UDFToDouble(_col1)) (type: double), _col10 (type: tinyint)
+                  expressions: _col4 (type: boolean), _col0 (type: tinyint), _col3 (type: timestamp), _col1 (type: float), _col2 (type: string), (- _col0) (type: tinyint), _col5 (type: tinyint), ((- _col0) + _col5) (type: tinyint), _col6 (type: double), (_col6 * UDFToDouble(((- _col0) + _col5))) (type: double), (- _col6) (type: double), (79.553 * UDFToDouble(_col1)) (type: double), _col7 (type: double), (- _col6) (type: double), _col8 (type: double), (UDFToDouble(((- _col0) + _col5)) - 10.175) (type: double), (- (- _col6)) (type: double), (-26.28 / (- (- _col6))) (type: double), _col9 (type: float), ((_col6 * UDFToDouble(((- _col0) + _col5))) / UDFToDouble(_col0)) (type: double), _col10 (type: tinyint)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 1365 Data size: 293479 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
index 1858cb0..c134c0b 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
@@ -85,19 +85,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      expressions: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
+                      outputColumnNames: ctinyint, cint, cfloat, cdouble, cstring1, ctimestamp1, cboolean1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_samp(_col0), min(_col2), stddev_samp(_col4), var_pop(_col4), var_samp(_col5), stddev_pop(_col5)
-                        keys: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                        aggregations: stddev_samp(cfloat), min(cdouble), stddev_samp(ctinyint), var_pop(ctinyint), var_samp(cint), stddev_pop(cint)
+                        keys: ctinyint (type: tinyint), cint (type: int), cfloat (type: float), cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp), cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          key expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                           sort order: +++++++
-                          Map-reduce partition columns: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: tinyint), _col1 (type: int), _col2 (type: float), _col3 (type: double), _col4 (type: string), _col5 (type: timestamp), _col6 (type: boolean)
                           Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: double), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,variance:double>), _col11 (type: struct<count:bigint,sum:double,variance:double>), _col12 (type: struct<count:bigint,sum:double,variance:double>)
             Execution mode: vectorized
@@ -105,12 +105,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_samp(VALUE._col0), min(VALUE._col1), stddev_samp(VALUE._col2), var_pop(VALUE._col3), var_samp(VALUE._col4), stddev_pop(VALUE._col5)
-                keys: KEY._col0 (type: float), KEY._col1 (type: boolean), KEY._col2 (type: double), KEY._col3 (type: string), KEY._col4 (type: tinyint), KEY._col5 (type: int), KEY._col6 (type: timestamp)
+                keys: KEY._col0 (type: tinyint), KEY._col1 (type: int), KEY._col2 (type: float), KEY._col3 (type: double), KEY._col4 (type: string), KEY._col5 (type: timestamp), KEY._col6 (type: boolean)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: float), _col1 (type: boolean), _col2 (type: double), _col3 (type: string), _col4 (type: tinyint), _col5 (type: int), _col6 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col5)) (type: double), _col8 (type: double), (_col2 * 79.553) (type: double), (33.0 % _col0) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col2) (type: double), (- _col4) (type: tinyint), _col11 (type: double), (UDFToFloat(_col5) - _col0) (type: float), (-23 % UDFToInteger(_col4)) (type: int), (- (-26.28 - UDFToDouble(_col5))) (type: double), _col12 (type: double)
+                  expressions: _col2 (type: float), _col6 (type: boolean), _col3 (type: double), _col4 (type: string), _col0 (type: tinyint), _col1 (type: int), _col5 (type: timestamp), _col7 (type: double), (-26.28 - UDFToDouble(_col1)) (type: double), _col8 (type: double), (_col3 * 79.553) (type: double), (33.0 % _col2) (type: float), _col9 (type: double), _col10 (type: double), (-23.0 % _col3) (type: double), (- _col0) (type: tinyint), _col11 (type: double), (UDFToFloat(_col1) - _col2) (type: float), (-23 % UDFToInteger(_col0)) (type: int), (- (-26.28 - UDFToDouble(_col1))) (type: double), _col12 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_16.q.out b/ql/src/test/results/clientpositive/tez/vectorization_16.q.out
index bfed7d1..1b39796 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_16.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_16.q.out
@@ -62,19 +62,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2
+                      expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      outputColumnNames: cdouble, cstring1, ctimestamp1
                       Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                        keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                        aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                        keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           sort order: +++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized
@@ -82,12 +82,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-                keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+                keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+                  expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                   Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_9.q.out b/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
index bfed7d1..1b39796 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_9.q.out
@@ -62,19 +62,19 @@ STAGE PLANS:
                     predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean)
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp)
-                      outputColumnNames: _col0, _col1, _col2
+                      expressions: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
+                      outputColumnNames: cdouble, cstring1, ctimestamp1
                       Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: count(_col1), stddev_samp(_col1), min(_col1)
-                        keys: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                        aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble)
+                        keys: cdouble (type: double), cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          key expressions: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           sort order: +++
-                          Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp)
+                          Map-reduce partition columns: _col0 (type: double), _col1 (type: string), _col2 (type: timestamp)
                           Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col3 (type: bigint), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: double)
             Execution mode: vectorized
@@ -82,12 +82,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2)
-                keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp)
+                keys: KEY._col0 (type: double), KEY._col1 (type: string), KEY._col2 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                 Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
+                  expressions: _col1 (type: string), _col0 (type: double), _col2 (type: timestamp), (_col0 - 9763215.5639) (type: double), (- (_col0 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * UDFToDouble(_col3)) (type: double), _col5 (type: double), (9763215.5639 / _col0) (type: double), (UDFToDouble(_col3) / -1.389) (type: double), _col4 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
                   Statistics: Num rows: 2048 Data size: 440327 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
index 1c5b51f..33f7ed9 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_limit.q.out
@@ -251,10 +251,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctinyint
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint)
+                      keys: ctinyint (type: tinyint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -342,10 +342,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint), cdouble (type: double)
-                    outputColumnNames: _col0, _col1
+                    outputColumnNames: ctinyint, cdouble
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: tinyint), _col1 (type: double)
+                      keys: ctinyint (type: tinyint), cdouble (type: double)
                       mode: hash
                       outputColumnNames: _col0, _col1
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -469,11 +469,11 @@ STAGE PLANS:
                     Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: cdouble, ctinyint
                       Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: sum(_col1)
-                        keys: _col0 (type: double)
+                        aggregations: sum(ctinyint)
+                        keys: cdouble (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1
                         Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out
index e203bfd..c9c301e 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out
@@ -25,10 +25,10 @@ STAGE PLANS:
                     Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint)
-                      outputColumnNames: _col0
+                      outputColumnNames: cbigint
                       Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0)
+                        aggregations: avg(cbigint)
                         mode: hash
                         outputColumnNames: _col0
                         Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index 59b457a..99171f2 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -156,10 +156,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cint, cdouble, csmallint, cfloat, ctinyint
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), sum(_col1), stddev_pop(_col0), stddev_samp(_col2), var_samp(_col0), avg(_col3), stddev_samp(_col0), min(_col4), count(_col2)
+                        aggregations: avg(cint), sum(cdouble), stddev_pop(cint), stddev_samp(csmallint), var_samp(cint), avg(cfloat), stddev_samp(cint), min(ctinyint), count(csmallint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE
@@ -368,10 +368,10 @@ STAGE PLANS:
                     Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cint, cbigint, csmallint, cdouble, ctinyint
                       Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col0), var_pop(_col1), stddev_pop(_col2), max(_col3), avg(_col4), min(_col0), min(_col3), stddev_samp(_col2), var_samp(_col0)
+                        aggregations: max(cint), var_pop(cbigint), stddev_pop(csmallint), max(cdouble), avg(ctinyint), min(cint), min(cdouble), stddev_samp(csmallint), var_samp(cint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -571,10 +571,10 @@ STAGE PLANS:
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
+                      outputColumnNames: cbigint, ctinyint, csmallint, cint, cdouble
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: var_pop(_col0), count(), max(_col1), stddev_pop(_col2), max(_col3), stddev_samp(_col4), count(_col1), avg(_col1)
+                        aggregations: var_pop(cbigint), count(), max(ctinyint), stddev_pop(csmallint), max(cint), stddev_samp(cdouble), count(ctinyint), avg(ctinyint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                         Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -753,10 +753,10 @@ STAGE PLANS:
                     Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
-                      outputColumnNames: _col0, _col1, _col2, _col3
+                      outputColumnNames: ctinyint, cbigint, cint, cfloat
                       Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: avg(_col0), max(_col1), stddev_samp(_col2), var_pop(_col2), var_pop(_col1), max(_col3)
+                        aggregations: avg(ctinyint), max(cbigint), stddev_samp(cint), var_pop(cint), var_pop(cbigint), max(cfloat)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5
                         Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE
@@ -1882,11 +1882,11 @@ STAGE PLANS:
                     Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: csmallint (type: smallint), cbigint (type: bigint), ctinyint (type: tinyint)
-                      outputColumnNames: _col0, _col1, _col2
+                      outputColumnNames: csmallint, cbigint, ctinyint
                       Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_samp(_col0), sum(_col1), var_pop(_col2), count()
-                        keys: _col0 (type: smallint)
+                        aggregations: stddev_samp(csmallint), sum(cbigint), var_pop(ctinyint), count()
+                        keys: csmallint (type: smallint)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4
                         Statistics: Num rows: 2503 Data size: 538153 Basic stats: COMPLETE Column stats: NONE
@@ -2089,11 +2089,11 @@ STAGE PLANS:
                     Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cdouble (type: double), cfloat (type: float)
-                      outputColumnNames: _col0, _col1
+                      outputColumnNames: cdouble, cfloat
                       Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
-                        keys: _col0 (type: double)
+                        aggregations: var_samp(cdouble), count(cfloat), sum(cfloat), var_pop(cdouble), stddev_pop(cdouble), sum(cdouble)
+                        keys: cdouble (type: double)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                         Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
@@ -2343,19 +2343,19 @@ STAGE PLANS:
                     predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
-                      expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      expressions: cstring1 (type: string), ctimestamp1 (type: timestamp), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
+                      outputColumnNames: cstring1, ctimestamp1, cint, csmallint, ctinyint, cfloat, cdouble
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: stddev_pop(_col2), avg(_col3), count(), min(_col4), var_samp(_col3), var_pop(_col5), avg(_col2), var_samp(_col5), avg(_col5), min(_col6), var_pop(_col3), stddev_pop(_col4), sum(_col2)
-                        keys: _col0 (type: timestamp), _col1 (type: string)
+                        aggregations: stddev_pop(cint), avg(csmallint), count(), min(ctinyint), var_samp(csmallint), var_pop(cfloat), avg(cint), var_samp(cfloat), avg(cfloat), min(cdouble), var_pop(csmallint), stddev_pop(ctinyint), sum(cint)
+                        keys: cstring1 (type: string), ctimestamp1 (type: timestamp)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                         Reduce Output Operator
-                          key expressions: _col0 (type: timestamp), _col1 (type: string)
+                          key expressions: _col0 (type: string), _col1 (type: timestamp)
                           sort order: ++
-                          Map-reduce partition columns: _col0 (type: timestamp), _col1 (type: string)
+                          Map-reduce partition columns: _col0 (type: string), _col1 (type: timestamp)
                           Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                           value expressions: _col2 (type: struct<count:bigint,sum:double,variance:double>), _col3 (type: struct<count:bigint,sum:double,input:smallint>), _col4 (type: bigint), _col5 (type: tinyint), _col6 (type: struct<count:bigint,sum:double,variance:double>), _col7 (type: struct<count:bigint,sum:double,variance:double>), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: struct<count:bigint,sum:double,variance:double>), _col10 (type: struct<count:bigint,sum:double,input:float>), _col11 (type: double), _col12 (type: struct<count:bigint,sum:double,variance:double>), _col13 (type: struct<count:bigint,sum:double,variance:double>), _col14 (type: bigint)
             Execution mode: vectorized
@@ -2363,12 +2363,12 @@ STAGE PLANS:
             Reduce Operator Tree:
               Group By Operator
                 aggregations: stddev_pop(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), min(VALUE._col3), var_samp(VALUE._col4), var_pop(VALUE._col5), avg(VALUE._col6), var_samp(VALUE._col7), avg(VALUE._col8), min(VALUE._col9), var_pop(VALUE._col10), stddev_pop(VALUE._col11), sum(VALUE._col12)
-                keys: KEY._col0 (type: timestamp), KEY._col1 (type: string)
+                keys: KEY._col0 (type: string), KEY._col1 (type: timestamp)
                 mode: mergepartial
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14
                 Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: _col0 (type: timestamp), _col1 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (-
  _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
+                  expressions: _col1 (type: timestamp), _col0 (type: string), _col2 (type: double), (_col2 * 10.175) (type: double), (- _col2) (type: double), _col3 (type: double), (- _col2) (type: double), (-26.28 - _col2) (type: double), _col4 (type: bigint), (- _col4) (type: bigint), ((-26.28 - _col2) * (- _col2)) (type: double), _col5 (type: tinyint), (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4))) (type: double), (- (_col2 * 10.175)) (type: double), _col6 (type: double), (_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- _col2)) (type: double), (UDFToDouble((- _col4)) / _col2) (type: double), _col7 (type: double), (10.175 / _col3) (type: double), _col8 (type: double), _col9 (type: double), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) (type: double), (- (- (_col2 * 10.175))) (type: double), _col10 (type: double), (((_col6 + (((-26.28 - _col2) * (-
  _col2)) * UDFToDouble((- _col4)))) - (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) * 10.175) (type: double), (10.175 % (10.175 / _col3)) (type: double), (- _col5) (type: tinyint), _col11 (type: double), _col12 (type: double), (- ((-26.28 - _col2) * (- _col2))) (type: double), ((- _col2) % _col10) (type: double), (-26.28 / UDFToDouble((- _col5))) (type: double), _col13 (type: double), _col14 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) / _col7) (type: double), (- (- _col4)) (type: bigint), _col4 (type: bigint), ((_col6 + (((-26.28 - _col2) * (- _col2)) * UDFToDouble((- _col4)))) % -26.28) (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38
                   Statistics: Num rows: 6144 Data size: 1320982 Basic stats: COMPLETE Column stats: NONE
                   Reduce Output Operator
@@ -2676,11 +2676,11 @@ STAGE PLANS:
                     Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)
-                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
+                      outputColumnNames: cboolean1, cfloat, cbigint, cint, cdouble, ctinyint, csmallint
                       Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                       Group By Operator
-                        aggregations: max(_col1), sum(_col2), var_samp(_col3), avg(_col4), min(_col2), var_pop(_col2), sum(_col3), stddev_samp(_col5), stddev_pop(_col6), avg(_col3)
-                        keys: _col0 (type: boolean)
+                        aggregations: max(cfloat), sum(cbigint), var_samp(cint), avg(cdouble), min(cbigint), var_pop(cbigint), sum(cint), stddev_samp(ctinyint), stddev_pop(csmallint), avg(cint)
+                        keys: cboolean1 (type: boolean)
                         mode: hash
                         outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
                         Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
@@ -2915,10 +2915,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                   Select Operator
                     expressions: i (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: i
                     Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(i)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3093,10 +3093,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ctinyint (type: tinyint)
-                    outputColumnNames: _col0
+                    outputColumnNames: ctinyint
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(ctinyint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3160,10 +3160,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: cint
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cint)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3227,10 +3227,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cfloat (type: float)
-                    outputColumnNames: _col0
+                    outputColumnNames: cfloat
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cfloat)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3294,10 +3294,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cstring1 (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: cstring1
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cstring1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -3361,10 +3361,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cboolean1 (type: boolean)
-                    outputColumnNames: _col0
+                    outputColumnNames: cboolean1
                     Statistics: Num rows: 12288 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: count(_col0)
+                      aggregations: count(cboolean1)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
index 23798e0..468802c 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_distinct_gby.q.out
@@ -38,10 +38,10 @@ STAGE PLANS:
                   Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: a (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: a
                     Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: a (type: int)
                       mode: final
                       outputColumnNames: _col0
                       Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
@@ -109,10 +109,10 @@ STAGE PLANS:
                   Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: cint (type: int)
-                    outputColumnNames: _col0
+                    outputColumnNames: cint
                     Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: int)
+                      keys: cint (type: int)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
index b253508..f0ddc5b 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
@@ -59,10 +59,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
@@ -1793,7 +1793,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -1832,10 +1832,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -1897,7 +1897,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -2773,10 +2773,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2791,10 +2791,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2973,10 +2973,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -2991,10 +2991,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -3161,20 +3161,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: ds (type: string)
+                  Group By Operator
+                    keys: ds (type: string)
+                    mode: hash
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 10 
             Map Operator Tree:
                 TableScan
@@ -3182,10 +3178,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -3199,20 +3195,16 @@ STAGE PLANS:
                   alias: srcpart
                   filterExpr: ds is not null (type: boolean)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                  Select Operator
-                    expressions: ds (type: string)
+                  Group By Operator
+                    keys: ds (type: string)
+                    mode: hash
                     outputColumnNames: _col0
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0
+                    Reduce Output Operator
+                      key expressions: _col0 (type: string)
+                      sort order: +
+                      Map-reduce partition columns: _col0 (type: string)
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: string)
-                        sort order: +
-                        Map-reduce partition columns: _col0 (type: string)
-                        Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Map 7 
             Map Operator Tree:
                 TableScan
@@ -3220,10 +3212,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -4186,7 +4178,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 #### A masked pattern was here ####
 1000
-Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: -- parent is reduce tasks
 EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
@@ -4241,10 +4233,10 @@ STAGE PLANS:
                   Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: '2008-04-08' (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      keys: _col0 (type: string)
+                      keys: ds (type: string)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
@@ -4288,7 +4280,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Map Join MAPJOIN[23][bigTable=?] in task 'Map 1' is a cross product
+Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart
@@ -4889,10 +4881,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: max(_col0)
+                      aggregations: max(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -4907,10 +4899,10 @@ STAGE PLANS:
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: ds (type: string)
-                    outputColumnNames: _col0
+                    outputColumnNames: ds
                     Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                     Group By Operator
-                      aggregations: min(_col0)
+                      aggregations: min(ds)
                       mode: hash
                       outputColumnNames: _col0
                       Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
index 01a36e5..74be17b 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out
@@ -75,19 +75,15 @@ STAGE PLANS:
                               1 Map 4
                             Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
                             HybridGraceHashJoin: true
-                            Select Operator
-                              expressions: _col1 (type: double)
+                            Group By Operator
+                              aggregations: sum(_col1)
+                              mode: hash
                               outputColumnNames: _col0
-                              Statistics: Num rows: 7433 Data size: 1598388 Basic stats: COMPLETE Column stats: NONE
-                              Group By Operator
-                                aggregations: sum(_col0)
-                                mode: hash
-                                outputColumnNames: _col0
+                              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
+                              Reduce Output Operator
+                                sort order: 
                                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                                Reduce Output Operator
-                                  sort order: 
-                                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
-                                  value expressions: _col0 (type: double)
+                                value expressions: _col0 (type: double)
             Execution mode: vectorized
         Map 4 
             Map Operator Tree:


[36/41] hive git commit: HIVE-11825 : get_json_object(col, '$.a') is null in where clause didnt work (Cazen Lee via Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11825 :  get_json_object(col,'$.a') is null in where clause didnt work (Cazen Lee via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c7de9b9f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c7de9b9f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c7de9b9f

Branch: refs/heads/llap
Commit: c7de9b9f5f627c3f3eef25ca783e88ccd7fa3ff6
Parents: 2278548
Author: Cazen Lee <Ca...@samsung.com>
Authored: Wed Sep 16 01:04:00 2015 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Sep 18 09:59:31 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c7de9b9f/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java
index e69ad68..2c42fae 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java
@@ -66,6 +66,8 @@ public class UDFJson extends UDF {
   static {
     // Allows for unescaped ASCII control characters in JSON values
     JSON_FACTORY.enable(Feature.ALLOW_UNQUOTED_CONTROL_CHARS);
+    // Enabled to accept quoting of all character backslash qooting mechanism
+    JSON_FACTORY.enable(Feature.ALLOW_BACKSLASH_ESCAPING_ANY_CHARACTER);
   }
   private static final ObjectMapper MAPPER = new ObjectMapper(JSON_FACTORY);
   private static final JavaType MAP_TYPE = TypeFactory.fromClass(Map.class);


[10/41] hive git commit: HIVE-11813: Avoid expensive AST tree conversion to String for expressions in WHERE clause in CBO (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)

Posted by se...@apache.org.
HIVE-11813: Avoid expensive AST tree conversion to String for expressions in WHERE clause in CBO (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5a550cb4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5a550cb4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5a550cb4

Branch: refs/heads/llap
Commit: 5a550cb466664daaa2322aeaf5abf19f2b9d51c0
Parents: 03f46b2
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Sep 16 15:48:59 2015 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Sep 16 15:48:59 2015 +0100

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/parse/CalcitePlanner.java     | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5a550cb4/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 8e992da..d5c747f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1482,8 +1482,9 @@ public class CalcitePlanner extends SemanticAnalyzer {
       return tableRel;
     }
 
-    private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel) throws SemanticException {
-      ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, relToHiveRR.get(srcRel));
+    private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel,
+            boolean useCaching) throws SemanticException {
+      ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, relToHiveRR.get(srcRel), useCaching);
       if (filterCondn instanceof ExprNodeConstantDesc
           && !filterCondn.getTypeString().equals(serdeConstants.BOOLEAN_TYPE_NAME)) {
         // queries like select * from t1 where 'foo';
@@ -1634,7 +1635,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
               subQuery.getJoinConditionAST());
           searchCond = subQuery.updateOuterQueryFilter(clonedSearchCond);
 
-          srcRel = genFilterRelNode(searchCond, srcRel);
+          srcRel = genFilterRelNode(searchCond, srcRel, forHavingClause);
 
           /*
            * For Not Exists and Not In, add a projection on top of the Left
@@ -1650,7 +1651,7 @@ public class CalcitePlanner extends SemanticAnalyzer {
         return srcRel;
       }
 
-      return genFilterRelNode(searchCond, srcRel);
+      return genFilterRelNode(searchCond, srcRel, forHavingClause);
     }
 
     private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException {


[23/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_resolution.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_resolution.q.out b/ql/src/test/results/clientpositive/groupby_resolution.q.out
index 7df53f2..ea40014 100644
--- a/ql/src/test/results/clientpositive/groupby_resolution.q.out
+++ b/ql/src/test/results/clientpositive/groupby_resolution.q.out
@@ -15,12 +15,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -60,12 +60,12 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: key (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
@@ -106,10 +106,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
                 Map-reduce partition columns: rand() (type: double)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -176,10 +176,10 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
                 Map-reduce partition columns: rand() (type: double)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -245,11 +245,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -297,11 +297,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -350,11 +350,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -427,11 +427,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -607,22 +607,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (key < '12') (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: key (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: rand() (type: double)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: rand() (type: double)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Reduce Operator Tree:
         Group By Operator
           aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_rollup1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out
index 8e04d86..292e14d 100644
--- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out
+++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out
@@ -116,11 +116,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, val
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT val)
+                keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE
@@ -293,11 +293,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), val (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, val
               Statistics: Num rows: 1 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), '0' (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT val)
+                keys: key (type: string), '0' (type: string), val (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 2 Data size: 60 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_sort_10.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_10.q.out b/ql/src/test/results/clientpositive/groupby_sort_10.q.out
index 2fead26..db8899f 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_10.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_10.q.out
@@ -41,10 +41,10 @@ STAGE PLANS:
             Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: final
                 outputColumnNames: _col0
                 Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE
@@ -109,11 +109,11 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 60 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 8 Data size: 60 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 bucketGroup: true
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 8 Data size: 60 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_sort_11.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_11.q.out b/ql/src/test/results/clientpositive/groupby_sort_11.q.out
index adf585d..2d4a44e 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_11.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_11.q.out
@@ -41,12 +41,12 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
+                aggregations: count(DISTINCT key)
                 bucketGroup: true
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
@@ -435,10 +435,10 @@ STAGE PLANS:
             Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: key (type: string)
                 sort order: +
                 Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/groupby_sort_8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_sort_8.q.out b/ql/src/test/results/clientpositive/groupby_sort_8.q.out
index ec16eb0..5152385 100644
--- a/ql/src/test/results/clientpositive/groupby_sort_8.q.out
+++ b/ql/src/test/results/clientpositive/groupby_sort_8.q.out
@@ -56,12 +56,12 @@ STAGE PLANS:
             Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
+                aggregations: count(DISTINCT key)
                 bucketGroup: true
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
@@ -122,11 +122,11 @@ STAGE PLANS:
             Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
-                keys: _col0 (type: string)
+                aggregations: count(DISTINCT key)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/having.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/having.q.out b/ql/src/test/results/clientpositive/having.q.out
index 8682a46..35b2901 100644
--- a/ql/src/test/results/clientpositive/having.q.out
+++ b/ql/src/test/results/clientpositive/having.q.out
@@ -99,22 +99,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) <> 302.0) (type: boolean)
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: max(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: string)
+                  value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0)
@@ -469,11 +465,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1)
-                keys: _col0 (type: string)
+                aggregations: max(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -736,22 +732,18 @@ STAGE PLANS:
             Filter Operator
               predicate: (UDFToDouble(key) > 300.0) (type: boolean)
               Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string), value (type: string)
+              Group By Operator
+                aggregations: max(value)
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: max(_col1)
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: string)
+                  value expressions: _col1 (type: string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: max(VALUE._col0)
@@ -930,11 +922,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: max(_col1)
-                keys: _col0 (type: string)
+                aggregations: max(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1192,11 +1184,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/having2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/having2.q.out b/ql/src/test/results/clientpositive/having2.q.out
index ba601f9..699d8ee 100644
--- a/ql/src/test/results/clientpositive/having2.q.out
+++ b/ql/src/test/results/clientpositive/having2.q.out
@@ -133,11 +133,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: customer_name (type: string), customer_balance (type: double), order_quantity (type: double), discount (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: customer_name, customer_balance, order_quantity, discount
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: sum(_col1), sum(_col2), count(_col3)
-                keys: _col0 (type: string)
+                aggregations: sum(customer_balance), sum(order_quantity), count(discount)
+                keys: customer_name (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -200,11 +200,11 @@ STAGE PLANS:
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: customer_name (type: string), customer_balance (type: double), order_quantity (type: double), discount (type: double)
-              outputColumnNames: _col0, _col1, _col2, _col3
+              outputColumnNames: customer_name, customer_balance, order_quantity, discount
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: sum(_col1), sum(_col2), count(_col3)
-                keys: _col0 (type: string)
+                aggregations: sum(customer_balance), sum(order_quantity), count(discount)
+                keys: customer_name (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
index 7755ed5..2639edf 100644
--- a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out
@@ -229,10 +229,10 @@ STAGE PLANS:
               predicate: (((((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (UDFToDouble(key) > 80.0)) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator
@@ -326,10 +326,10 @@ STAGE PLANS:
               predicate: (((((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (UDFToDouble(key) > 70.0)) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/index_auto_self_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/index_auto_self_join.q.out
index a214845..e8c23dc 100644
--- a/ql/src/test/results/clientpositive/index_auto_self_join.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_self_join.q.out
@@ -137,10 +137,10 @@ STAGE PLANS:
               predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator
@@ -235,10 +235,10 @@ STAGE PLANS:
               predicate: (((UDFToDouble(key) > 70.0) and (UDFToDouble(key) < 90.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/index_auto_update.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out
index 70d0286..11af3f5 100644
--- a/ql/src/test/results/clientpositive/index_auto_update.q.out
+++ b/ql/src/test/results/clientpositive/index_auto_update.q.out
@@ -109,10 +109,10 @@ STAGE PLANS:
             alias: temp
             Select Operator
               expressions: key (type: string), INPUT__FILE__NAME (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: key, INPUT__FILE__NAME, BLOCK__OFFSET__INSIDE__FILE
               Group By Operator
-                aggregations: collect_set(_col2)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: collect_set(BLOCK__OFFSET__INSIDE__FILE)
+                keys: key (type: string), INPUT__FILE__NAME (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out b/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
index 65b6721..3df629e 100644
--- a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out
@@ -70,10 +70,10 @@ STAGE PLANS:
               predicate: ((UDFToDouble(key) = 86.0) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
index 9673f08..37a2944 100644
--- a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
+++ b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out
@@ -44,10 +44,10 @@ STAGE PLANS:
               predicate: (((UDFToDouble(key) > 80.0) and (UDFToDouble(key) < 100.0)) and (not EWAH_BITMAP_EMPTY(_bitmaps))) (type: boolean)
               Select Operator
                 expressions: _bucketname (type: string), _offset (type: bigint)
-                outputColumnNames: _col0, _col1
+                outputColumnNames: _col1, _col2
                 Group By Operator
-                  aggregations: collect_set(_col1)
-                  keys: _col0 (type: string)
+                  aggregations: collect_set(_col2)
+                  keys: _col1 (type: string)
                   mode: hash
                   outputColumnNames: _col0, _col1
                   Reduce Output Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
index c2f0810..a12dcd5 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out
@@ -473,11 +473,11 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
index 6aef463..1294766 100644
--- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
+++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out
@@ -81,11 +81,11 @@ STAGE PLANS:
             Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: key
               Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
                 aggregations: count()
-                keys: _col0 (type: string)
+                keys: key (type: string)
                 mode: final
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/join18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join18.q.out b/ql/src/test/results/clientpositive/join18.q.out
index c3ab306..6a50604 100644
--- a/ql/src/test/results/clientpositive/join18.q.out
+++ b/ql/src/test/results/clientpositive/join18.q.out
@@ -43,11 +43,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -113,11 +113,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT value)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/join18_multi_distinct.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/join18_multi_distinct.q.out
index a83102c..dfb0f68 100644
--- a/ql/src/test/results/clientpositive/join18_multi_distinct.q.out
+++ b/ql/src/test/results/clientpositive/join18_multi_distinct.q.out
@@ -45,11 +45,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(value)
+                keys: key (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -115,11 +115,11 @@ STAGE PLANS:
             Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: key (type: string), value (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: key, value
               Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1), count(DISTINCT _col0)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT value), count(DISTINCT key)
+                keys: key (type: string), value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3
                 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/join31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join31.q.out b/ql/src/test/results/clientpositive/join31.q.out
index 21aa1de..70ca814 100644
--- a/ql/src/test/results/clientpositive/join31.q.out
+++ b/ql/src/test/results/clientpositive/join31.q.out
@@ -51,20 +51,16 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string)
@@ -264,20 +260,16 @@ STAGE PLANS:
             Filter Operator
               predicate: key is not null (type: boolean)
               Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: key (type: string)
+              Group By Operator
+                keys: key (type: string)
+                mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
index 904acfc..dbc5eac 100644
--- a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
+++ b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out
@@ -550,10 +550,10 @@ STAGE PLANS:
             Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: hr (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: hr
               Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: string)
+                keys: hr (type: string)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/limit_pushdown.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/limit_pushdown.q.out b/ql/src/test/results/clientpositive/limit_pushdown.q.out
index d77bc60..40f6a48 100644
--- a/ql/src/test/results/clientpositive/limit_pushdown.q.out
+++ b/ql/src/test/results/clientpositive/limit_pushdown.q.out
@@ -355,10 +355,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: cdouble (type: double)
-              outputColumnNames: _col0
+              outputColumnNames: cdouble
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: double)
+                keys: cdouble (type: double)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -438,11 +438,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cdouble (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ctinyint, cdouble
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: tinyint), _col1 (type: double)
+                aggregations: count(DISTINCT cdouble)
+                keys: ctinyint (type: tinyint), cdouble (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -523,10 +523,10 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cdouble (type: double)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ctinyint, cdouble
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                keys: _col0 (type: tinyint), _col1 (type: double)
+                keys: ctinyint (type: tinyint), cdouble (type: double)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -613,11 +613,11 @@ STAGE PLANS:
             Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
-              outputColumnNames: _col0, _col1, _col2
+              outputColumnNames: ctinyint, cstring1, cstring2
               Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1), count(DISTINCT _col2)
-                keys: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string)
+                aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2)
+                keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2, _col3, _col4
                 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
@@ -727,11 +727,11 @@ STAGE PLANS:
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: value (type: string), key (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: value, key
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: sum(_col1)
-                keys: _col0 (type: string)
+                aggregations: sum(key)
+                keys: value (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -1012,16 +1012,16 @@ STAGE PLANS:
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Select Operator
-              expressions: value (type: string), key (type: string)
-              outputColumnNames: _col0, _col1
+              expressions: key (type: string), value (type: string)
+              outputColumnNames: key, value
               Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
               Reduce Output Operator
-                key expressions: _col0 (type: string)
+                key expressions: value (type: string)
                 sort order: +
-                Map-reduce partition columns: _col0 (type: string)
+                Map-reduce partition columns: value (type: string)
                 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                 TopN Hash Memory Usage: 0.3
-                value expressions: _col1 (type: string)
+                value expressions: key (type: string)
       Reduce Operator Tree:
         Group By Operator
           aggregations: sum(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/lineage2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
index 549b5f6..4184a83 100644
--- a/ql/src/test/results/clientpositive/lineage2.q.out
+++ b/ql/src/test/results/clientpositive/lineage2.q.out
@@ -634,7 +634,7 @@ having count(a.c2) > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dest_l2
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) $f0) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) $f0)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) $f2)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default
 .dest_l2.id"}]}
+{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) c1) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) c1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) id)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.de
 st_l2.id"}]}
 1
 PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
 from dest_l2 a join dest_l3 b on (a.id = b.id)

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index 6fd2aa4..708abee 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -180,7 +180,7 @@ PREHOOK: Input: default@src1
 #### A masked pattern was here ####
 {"version":"1.0","engine":"mr","hash":"8bf193b0658183be94e2428a79d91d10","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > UDFToDouble(300))","edgeType":"PREDICATE"},{"sources":[2,4],"targets":[0,1],"expression":"(UDFToDouble(a.key) = UDFToDouble((UDFToInteger(b.ctinyint) + 300)))","edgeType":"PREDICATE"},{"sources":[],"targets":[0,1],"expression":"(1 = 1)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 311	val_311
-Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
+Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: select key, value from src1
 where key not in (select key+18 from src1) order by key
 PREHOOK: type: QUERY
@@ -317,6 +317,6 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Input: default@dest_v3
 #### A masked pattern was here ####
-{"version":"1.0","engine":"mr","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) $f0) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) $f1)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) $f1)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.cint) > 10)","edge
 Type":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
+{"version":"1.0","engine":"mr","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.c
 int) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
 38	216	false
 38	229	true

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
index 753729f..8f1d94e 100644
--- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
+++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
@@ -451,7 +451,7 @@ STAGE PLANS:
               name: default.fact_daily
             name: default.fact_daily
       Truncated Path -> Alias:
-        /fact_daily/ds=1/hr=1 [$hdt$_0:fact_daily]
+        /fact_daily/ds=1/hr=1 [fact_daily]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
index 9775f30..e92f307 100644
--- a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
@@ -566,11 +566,11 @@ STAGE PLANS:
   Stage: Stage-9
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_1:src 
+        $hdt$_1:src 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_1:src 
+        $hdt$_1:src 
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -619,11 +619,11 @@ STAGE PLANS:
   Stage: Stage-8
     Map Reduce Local Work
       Alias -> Map Local Tables:
-        $hdt$_0:$hdt$_2:src 
+        $hdt$_2:src 
           Fetch Operator
             limit: -1
       Alias -> Map Local Operator Tree:
-        $hdt$_0:$hdt$_2:src 
+        $hdt$_2:src 
           TableScan
             alias: src
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
@@ -651,22 +651,18 @@ STAGE PLANS:
                 1 _col0 (type: string)
               outputColumnNames: _col2
               Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-              Select Operator
-                expressions: _col2 (type: string)
-                outputColumnNames: _col0
+              Group By Operator
+                aggregations: count()
+                keys: _col2 (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                Group By Operator
-                  aggregations: count()
-                  keys: _col0 (type: string)
-                  mode: hash
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: _col0 (type: string)
-                    sort order: +
-                    Map-reduce partition columns: _col0 (type: string)
-                    Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col1 (type: bigint)
+                  value expressions: _col1 (type: bigint)
       Local Work:
         Map Reduce Local Work
       Reduce Operator Tree:

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/metadata_only_queries.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/metadata_only_queries.q.out
index 5907f4a..2dcd437 100644
--- a/ql/src/test/results/clientpositive/metadata_only_queries.q.out
+++ b/ql/src/test/results/clientpositive/metadata_only_queries.q.out
@@ -431,10 +431,10 @@ STAGE PLANS:
             Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
             Select Operator
               expressions: ts (type: timestamp)
-              outputColumnNames: _col0
+              outputColumnNames: ts
               Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(ts)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE

http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/metadataonly1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/metadataonly1.q.out b/ql/src/test/results/clientpositive/metadataonly1.q.out
index 0d758a5..727b2e7 100644
--- a/ql/src/test/results/clientpositive/metadataonly1.q.out
+++ b/ql/src/test/results/clientpositive/metadataonly1.q.out
@@ -43,10 +43,10 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(ds)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -144,10 +144,10 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(ds)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -158,7 +158,7 @@ STAGE PLANS:
                   value expressions: _col0 (type: string)
                   auto parallelism: false
       Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
       Path -> Partition:
         -mr-10003default.test1{ds=1} 
           Partition
@@ -200,7 +200,7 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -284,11 +284,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col0)
-                keys: _col0 (type: string)
+                aggregations: count(DISTINCT ds)
+                keys: ds (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -299,7 +299,7 @@ STAGE PLANS:
                   tag: -1
                   auto parallelism: false
       Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
       Path -> Partition:
         -mr-10003default.test1{ds=1} 
           Partition
@@ -341,7 +341,7 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -425,10 +425,10 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(_col0)
+                aggregations: count(ds)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
@@ -482,7 +482,7 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        /test1/ds=1 [$hdt$_0:test1]
+        /test1/ds=1 [test1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -604,10 +604,10 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(ds)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -618,8 +618,8 @@ STAGE PLANS:
                   value expressions: _col0 (type: string)
                   auto parallelism: false
       Path -> Alias:
-        -mr-10005default.test1{ds=1} [$hdt$_0:$hdt$_1:$hdt$_1:a2]
-        -mr-10006default.test1{ds=2} [$hdt$_0:$hdt$_1:$hdt$_1:a2]
+        -mr-10005default.test1{ds=1} [$hdt$_1:a2]
+        -mr-10006default.test1{ds=2} [$hdt$_1:a2]
       Path -> Partition:
         -mr-10005default.test1{ds=1} 
           Partition
@@ -700,8 +700,8 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        -mr-10005default.test1{ds=1} [$hdt$_0:$hdt$_1:$hdt$_1:a2]
-        -mr-10006default.test1{ds=2} [$hdt$_0:$hdt$_1:$hdt$_1:a2]
+        -mr-10005default.test1{ds=1} [$hdt$_1:a2]
+        -mr-10006default.test1{ds=2} [$hdt$_1:a2]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -862,8 +862,8 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        /test1/ds=1 [$hdt$_0:$hdt$_0:a2]
-        /test1/ds=2 [$hdt$_0:$hdt$_0:a2]
+        /test1/ds=1 [$hdt$_0:a2]
+        /test1/ds=2 [$hdt$_0:a2]
 #### A masked pattern was here ####
       Needs Tagging: true
       Reduce Operator Tree:
@@ -1053,11 +1053,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ds, hr
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT hr)
+                keys: ds (type: string), hr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1069,9 +1069,9 @@ STAGE PLANS:
                   tag: -1
                   auto parallelism: false
       Path -> Alias:
-        -mr-10003default.test2{ds=1, hr=1} [$hdt$_0:test2]
-        -mr-10004default.test2{ds=1, hr=2} [$hdt$_0:test2]
-        -mr-10005default.test2{ds=1, hr=3} [$hdt$_0:test2]
+        -mr-10003default.test2{ds=1, hr=1} [test2]
+        -mr-10004default.test2{ds=1, hr=2} [test2]
+        -mr-10005default.test2{ds=1, hr=3} [test2]
       Path -> Partition:
         -mr-10003default.test2{ds=1, hr=1} 
           Partition
@@ -1194,9 +1194,9 @@ STAGE PLANS:
               name: default.test2
             name: default.test2
       Truncated Path -> Alias:
-        -mr-10003default.test2{ds=1, hr=1} [$hdt$_0:test2]
-        -mr-10004default.test2{ds=1, hr=2} [$hdt$_0:test2]
-        -mr-10005default.test2{ds=1, hr=3} [$hdt$_0:test2]
+        -mr-10003default.test2{ds=1, hr=1} [test2]
+        -mr-10004default.test2{ds=1, hr=2} [test2]
+        -mr-10005default.test2{ds=1, hr=3} [test2]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -1291,11 +1291,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ds, hr
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(_col1)
-                keys: _col0 (type: string)
+                aggregations: count(hr)
+                keys: ds (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1434,9 +1434,9 @@ STAGE PLANS:
               name: default.test2
             name: default.test2
       Truncated Path -> Alias:
-        /test2/ds=1/hr=1 [$hdt$_0:test2]
-        /test2/ds=1/hr=2 [$hdt$_0:test2]
-        /test2/ds=1/hr=3 [$hdt$_0:test2]
+        /test2/ds=1/hr=1 [test2]
+        /test2/ds=1/hr=2 [test2]
+        /test2/ds=1/hr=3 [test2]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -1524,10 +1524,10 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string)
-              outputColumnNames: _col0
+              outputColumnNames: ds
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: max(_col0)
+                aggregations: max(ds)
                 mode: hash
                 outputColumnNames: _col0
                 Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE
@@ -1538,8 +1538,8 @@ STAGE PLANS:
                   value expressions: _col0 (type: string)
                   auto parallelism: false
       Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
-        -mr-10004default.test1{ds=2} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
+        -mr-10004default.test1{ds=2} [test1]
       Path -> Partition:
         -mr-10003default.test1{ds=1} 
           Partition
@@ -1620,8 +1620,8 @@ STAGE PLANS:
               name: default.test1
             name: default.test1
       Truncated Path -> Alias:
-        -mr-10003default.test1{ds=1} [$hdt$_0:test1]
-        -mr-10004default.test1{ds=2} [$hdt$_0:test1]
+        -mr-10003default.test1{ds=1} [test1]
+        -mr-10004default.test1{ds=2} [test1]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator
@@ -1764,11 +1764,11 @@ STAGE PLANS:
             GatherStats: false
             Select Operator
               expressions: ds (type: string), hr (type: string)
-              outputColumnNames: _col0, _col1
+              outputColumnNames: ds, hr
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
               Group By Operator
-                aggregations: count(DISTINCT _col1)
-                keys: _col0 (type: string), _col1 (type: string)
+                aggregations: count(DISTINCT hr)
+                keys: ds (type: string), hr (type: string)
                 mode: hash
                 outputColumnNames: _col0, _col1, _col2
                 Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
@@ -1780,11 +1780,11 @@ STAGE PLANS:
                   tag: -1
                   auto parallelism: false
       Path -> Alias:
-        -mr-10003default.test2{ds=01_10_10, hr=01} [$hdt$_0:test2]
-        -mr-10004default.test2{ds=01_10_20, hr=02} [$hdt$_0:test2]
-        -mr-10005default.test2{ds=1, hr=1} [$hdt$_0:test2]
-        -mr-10006default.test2{ds=1, hr=2} [$hdt$_0:test2]
-        -mr-10007default.test2{ds=1, hr=3} [$hdt$_0:test2]
+        -mr-10003default.test2{ds=01_10_10, hr=01} [test2]
+        -mr-10004default.test2{ds=01_10_20, hr=02} [test2]
+        -mr-10005default.test2{ds=1, hr=1} [test2]
+        -mr-10006default.test2{ds=1, hr=2} [test2]
+        -mr-10007default.test2{ds=1, hr=3} [test2]
       Path -> Partition:
         -mr-10003default.test2{ds=01_10_10, hr=01} 
           Partition
@@ -1987,11 +1987,11 @@ STAGE PLANS:
               name: default.test2
             name: default.test2
       Truncated Path -> Alias:
-        -mr-10003default.test2{ds=01_10_10, hr=01} [$hdt$_0:test2]
-        -mr-10004default.test2{ds=01_10_20, hr=02} [$hdt$_0:test2]
-        -mr-10005default.test2{ds=1, hr=1} [$hdt$_0:test2]
-        -mr-10006default.test2{ds=1, hr=2} [$hdt$_0:test2]
-        -mr-10007default.test2{ds=1, hr=3} [$hdt$_0:test2]
+        -mr-10003default.test2{ds=01_10_10, hr=01} [test2]
+        -mr-10004default.test2{ds=01_10_20, hr=02} [test2]
+        -mr-10005default.test2{ds=1, hr=1} [test2]
+        -mr-10006default.test2{ds=1, hr=2} [test2]
+        -mr-10007default.test2{ds=1, hr=3} [test2]
       Needs Tagging: false
       Reduce Operator Tree:
         Group By Operator


[16/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
index 8156789..57fcc3c 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_2.q.out
@@ -335,245 +335,248 @@ Stage-0
       limit:100
       Stage-1
          Reducer 5
-         File Output Operator [FS_69]
+         File Output Operator [FS_68]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Limit [LIM_68]
+            Limit [LIM_67]
                Number of rows:100
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-               Select Operator [SEL_67]
+               Select Operator [SEL_66]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                |<-Reducer 4 [SIMPLE_EDGE]
-                  Reduce Output Operator [RS_66]
+                  Reduce Output Operator [RS_65]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
                      Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
-                     Group By Operator [GBY_64]
-                     |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
-                     |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
-                     |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
-                     |<-Reducer 3 [SIMPLE_EDGE]
-                        Reduce Output Operator [RS_63]
-                           key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                           Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                           sort order:+++
-                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                           value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
-                           Group By Operator [GBY_62]
-                              aggregations:["count(_col3)","count(_col4)","count(_col5)"]
-                              keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                              outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                     Select Operator [SEL_64]
+                        outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                        Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator [GBY_63]
+                        |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
+                        |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+                        |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                        |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                        |<-Reducer 3 [SIMPLE_EDGE]
+                           Reduce Output Operator [RS_62]
+                              key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+                              Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+                              sort order:+++
                               Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                              Select Operator [SEL_60]
+                              value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
+                              Group By Operator [GBY_61]
+                                 aggregations:["count(_col13)","count(_col21)","count(_col3)"]
+                                 keys:_col2 (type: string), _col12 (type: string), _col20 (type: string)
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                                 Merge Join Operator [MERGEJOIN_111]
-                                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
-                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 11 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_58]
-                                 |     key expressions:_col15 (type: string), _col17 (type: string)
-                                 |     Map-reduce partition columns:_col15 (type: string), _col17 (type: string)
-                                 |     sort order:++
-                                 |     Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string)
-                                 |     Select Operator [SEL_49]
-                                 |        outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
-                                 |        Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 |        Merge Join Operator [MERGEJOIN_110]
-                                 |        |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |  keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"}
-                                 |        |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
-                                 |        |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Reducer 10 [SIMPLE_EDGE]
-                                 |        |  Reduce Output Operator [RS_45]
-                                 |        |     key expressions:_col4 (type: string), _col6 (type: string)
-                                 |        |     Map-reduce partition columns:_col4 (type: string), _col6 (type: string)
-                                 |        |     sort order:++
-                                 |        |     Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     value expressions:_col2 (type: string), _col3 (type: string)
-                                 |        |     Merge Join Operator [MERGEJOIN_108]
-                                 |        |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |     |  keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"}
-                                 |        |     |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                 |        |     |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     |<-Map 14 [SIMPLE_EDGE]
-                                 |        |     |  Reduce Output Operator [RS_42]
-                                 |        |     |     key expressions:_col1 (type: string)
-                                 |        |     |     Map-reduce partition columns:_col1 (type: string)
-                                 |        |     |     sort order:+
-                                 |        |     |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     |     Select Operator [SEL_16]
-                                 |        |     |        outputColumnNames:["_col1"]
-                                 |        |     |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     |        Filter Operator [FIL_102]
-                                 |        |     |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                 |        |     |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     |           TableScan [TS_14]
-                                 |        |     |              alias:src1
-                                 |        |     |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
-                                 |        |     |<-Reducer 9 [SIMPLE_EDGE]
-                                 |        |        Reduce Output Operator [RS_40]
-                                 |        |           key expressions:_col3 (type: string)
-                                 |        |           Map-reduce partition columns:_col3 (type: string)
-                                 |        |           sort order:+
-                                 |        |           Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string)
-                                 |        |           Merge Join Operator [MERGEJOIN_107]
-                                 |        |           |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
-                                 |        |           |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                 |        |           |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           |<-Map 13 [SIMPLE_EDGE]
-                                 |        |           |  Reduce Output Operator [RS_37]
-                                 |        |           |     key expressions:_col0 (type: string)
-                                 |        |           |     Map-reduce partition columns:_col0 (type: string)
-                                 |        |           |     sort order:+
-                                 |        |           |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           |     Select Operator [SEL_13]
-                                 |        |           |        outputColumnNames:["_col0"]
-                                 |        |           |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           |        Filter Operator [FIL_101]
-                                 |        |           |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                 |        |           |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           |           TableScan [TS_11]
-                                 |        |           |              alias:d1
-                                 |        |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |        |           |<-Reducer 8 [SIMPLE_EDGE]
-                                 |        |              Reduce Output Operator [RS_35]
-                                 |        |                 key expressions:_col2 (type: string)
-                                 |        |                 Map-reduce partition columns:_col2 (type: string)
-                                 |        |                 sort order:+
-                                 |        |                 Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
-                                 |        |                 Merge Join Operator [MERGEJOIN_106]
-                                 |        |                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |        |                 |  keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"}
-                                 |        |                 |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                 |        |                 |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 |<-Map 12 [SIMPLE_EDGE]
-                                 |        |                 |  Reduce Output Operator [RS_32]
-                                 |        |                 |     key expressions:_col3 (type: string)
-                                 |        |                 |     Map-reduce partition columns:_col3 (type: string)
-                                 |        |                 |     sort order:+
-                                 |        |                 |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                 |        |                 |     Select Operator [SEL_10]
-                                 |        |                 |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                 |        |                 |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 |        Filter Operator [FIL_100]
-                                 |        |                 |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                 |        |                 |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 |           TableScan [TS_8]
-                                 |        |                 |              alias:ss
-                                 |        |                 |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                 |<-Map 7 [SIMPLE_EDGE]
-                                 |        |                    Reduce Output Operator [RS_30]
-                                 |        |                       key expressions:_col1 (type: string)
-                                 |        |                       Map-reduce partition columns:_col1 (type: string)
-                                 |        |                       sort order:+
-                                 |        |                       Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                       Select Operator [SEL_7]
-                                 |        |                          outputColumnNames:["_col1"]
-                                 |        |                          Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                          Filter Operator [FIL_99]
-                                 |        |                             predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
-                                 |        |                             Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |        |                             TableScan [TS_5]
-                                 |        |                                alias:srcpart
-                                 |        |                                Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
-                                 |        |<-Reducer 16 [SIMPLE_EDGE]
-                                 |           Reduce Output Operator [RS_47]
-                                 |              key expressions:_col2 (type: string), _col4 (type: string)
-                                 |              Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
-                                 |              sort order:++
-                                 |              Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |              value expressions:_col3 (type: string), _col5 (type: string)
-                                 |              Merge Join Operator [MERGEJOIN_109]
-                                 |              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |              |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                 |              |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                 |              |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Map 15 [SIMPLE_EDGE]
-                                 |              |  Reduce Output Operator [RS_24]
-                                 |              |     key expressions:_col0 (type: string)
-                                 |              |     Map-reduce partition columns:_col0 (type: string)
-                                 |              |     sort order:+
-                                 |              |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |              |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                 |              |     Select Operator [SEL_19]
-                                 |              |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                 |              |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |              |        Filter Operator [FIL_103]
-                                 |              |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
-                                 |              |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                 |              |           TableScan [TS_17]
-                                 |              |              alias:sr
-                                 |              |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                 |              |<-Map 17 [SIMPLE_EDGE]
-                                 |                 Reduce Output Operator [RS_26]
-                                 |                    key expressions:_col0 (type: string)
-                                 |                    Map-reduce partition columns:_col0 (type: string)
-                                 |                    sort order:+
-                                 |                    Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |                    Select Operator [SEL_22]
-                                 |                       outputColumnNames:["_col0"]
-                                 |                       Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |                       Filter Operator [FIL_104]
-                                 |                          predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |                          Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |                          TableScan [TS_20]
-                                 |                             alias:d1
-                                 |                             Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Reducer 2 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_56]
-                                       key expressions:_col1 (type: string), _col3 (type: string)
-                                       Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
-                                       sort order:++
-                                       Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       value expressions:_col2 (type: string)
-                                       Merge Join Operator [MERGEJOIN_105]
-                                       |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                       |  outputColumnNames:["_col1","_col2","_col3"]
-                                       |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 1 [SIMPLE_EDGE]
-                                       |  Reduce Output Operator [RS_51]
-                                       |     key expressions:_col0 (type: string)
-                                       |     Map-reduce partition columns:_col0 (type: string)
-                                       |     sort order:+
-                                       |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                       |     Select Operator [SEL_1]
-                                       |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                       |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |        Filter Operator [FIL_97]
-                                       |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                       |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                       |           TableScan [TS_0]
-                                       |              alias:cs
-                                       |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 6 [SIMPLE_EDGE]
-                                          Reduce Output Operator [RS_53]
-                                             key expressions:_col0 (type: string)
-                                             Map-reduce partition columns:_col0 (type: string)
-                                             sort order:+
-                                             Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             Select Operator [SEL_4]
-                                                outputColumnNames:["_col0"]
+                                 Select Operator [SEL_60]
+                                    outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"]
+                                    Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                    Merge Join Operator [MERGEJOIN_110]
+                                    |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |  keys:{"0":"_col1 (type: string), _col3 (type: string)","1":"_col15 (type: string), _col17 (type: string)"}
+                                    |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                    |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                    |<-Reducer 11 [SIMPLE_EDGE]
+                                    |  Reduce Output Operator [RS_58]
+                                    |     key expressions:_col15 (type: string), _col17 (type: string)
+                                    |     Map-reduce partition columns:_col15 (type: string), _col17 (type: string)
+                                    |     sort order:++
+                                    |     Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                    |     value expressions:_col6 (type: string), _col7 (type: string), _col14 (type: string)
+                                    |     Select Operator [SEL_49]
+                                    |        outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                    |        Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                    |        Merge Join Operator [MERGEJOIN_109]
+                                    |        |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |        |  keys:{"0":"_col4 (type: string), _col6 (type: string)","1":"_col2 (type: string), _col4 (type: string)"}
+                                    |        |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
+                                    |        |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                    |        |<-Reducer 10 [SIMPLE_EDGE]
+                                    |        |  Reduce Output Operator [RS_45]
+                                    |        |     key expressions:_col4 (type: string), _col6 (type: string)
+                                    |        |     Map-reduce partition columns:_col4 (type: string), _col6 (type: string)
+                                    |        |     sort order:++
+                                    |        |     Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     value expressions:_col2 (type: string), _col3 (type: string)
+                                    |        |     Merge Join Operator [MERGEJOIN_107]
+                                    |        |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |        |     |  keys:{"0":"_col3 (type: string)","1":"_col1 (type: string)"}
+                                    |        |     |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                    |        |     |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     |<-Map 14 [SIMPLE_EDGE]
+                                    |        |     |  Reduce Output Operator [RS_42]
+                                    |        |     |     key expressions:_col1 (type: string)
+                                    |        |     |     Map-reduce partition columns:_col1 (type: string)
+                                    |        |     |     sort order:+
+                                    |        |     |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     |     Select Operator [SEL_16]
+                                    |        |     |        outputColumnNames:["_col1"]
+                                    |        |     |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     |        Filter Operator [FIL_101]
+                                    |        |     |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                    |        |     |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     |           TableScan [TS_14]
+                                    |        |     |              alias:src1
+                                    |        |     |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                    |        |     |<-Reducer 9 [SIMPLE_EDGE]
+                                    |        |        Reduce Output Operator [RS_40]
+                                    |        |           key expressions:_col3 (type: string)
+                                    |        |           Map-reduce partition columns:_col3 (type: string)
+                                    |        |           sort order:+
+                                    |        |           Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           value expressions:_col2 (type: string), _col4 (type: string), _col6 (type: string)
+                                    |        |           Merge Join Operator [MERGEJOIN_106]
+                                    |        |           |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |        |           |  keys:{"0":"_col2 (type: string)","1":"_col0 (type: string)"}
+                                    |        |           |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                    |        |           |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           |<-Map 13 [SIMPLE_EDGE]
+                                    |        |           |  Reduce Output Operator [RS_37]
+                                    |        |           |     key expressions:_col0 (type: string)
+                                    |        |           |     Map-reduce partition columns:_col0 (type: string)
+                                    |        |           |     sort order:+
+                                    |        |           |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           |     Select Operator [SEL_13]
+                                    |        |           |        outputColumnNames:["_col0"]
+                                    |        |           |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           |        Filter Operator [FIL_100]
+                                    |        |           |           predicate:((value = 'd1value') and key is not null) (type: boolean)
+                                    |        |           |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           |           TableScan [TS_11]
+                                    |        |           |              alias:d1
+                                    |        |           |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |        |           |<-Reducer 8 [SIMPLE_EDGE]
+                                    |        |              Reduce Output Operator [RS_35]
+                                    |        |                 key expressions:_col2 (type: string)
+                                    |        |                 Map-reduce partition columns:_col2 (type: string)
+                                    |        |                 sort order:+
+                                    |        |                 Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 value expressions:_col3 (type: string), _col4 (type: string), _col6 (type: string)
+                                    |        |                 Merge Join Operator [MERGEJOIN_105]
+                                    |        |                 |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |        |                 |  keys:{"0":"_col1 (type: string)","1":"_col3 (type: string)"}
+                                    |        |                 |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                    |        |                 |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 |<-Map 12 [SIMPLE_EDGE]
+                                    |        |                 |  Reduce Output Operator [RS_32]
+                                    |        |                 |     key expressions:_col3 (type: string)
+                                    |        |                 |     Map-reduce partition columns:_col3 (type: string)
+                                    |        |                 |     sort order:+
+                                    |        |                 |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
+                                    |        |                 |     Select Operator [SEL_10]
+                                    |        |                 |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+                                    |        |                 |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 |        Filter Operator [FIL_99]
+                                    |        |                 |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
+                                    |        |                 |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 |           TableScan [TS_8]
+                                    |        |                 |              alias:ss
+                                    |        |                 |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                 |<-Map 7 [SIMPLE_EDGE]
+                                    |        |                    Reduce Output Operator [RS_30]
+                                    |        |                       key expressions:_col1 (type: string)
+                                    |        |                       Map-reduce partition columns:_col1 (type: string)
+                                    |        |                       sort order:+
+                                    |        |                       Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                       Select Operator [SEL_7]
+                                    |        |                          outputColumnNames:["_col1"]
+                                    |        |                          Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                          Filter Operator [FIL_98]
+                                    |        |                             predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                    |        |                             Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |        |                             TableScan [TS_5]
+                                    |        |                                alias:srcpart
+                                    |        |                                Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                                    |        |<-Reducer 16 [SIMPLE_EDGE]
+                                    |           Reduce Output Operator [RS_47]
+                                    |              key expressions:_col2 (type: string), _col4 (type: string)
+                                    |              Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                    |              sort order:++
+                                    |              Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                    |              value expressions:_col3 (type: string), _col5 (type: string)
+                                    |              Merge Join Operator [MERGEJOIN_108]
+                                    |              |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |              |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                    |              |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                    |              |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                    |              |<-Map 15 [SIMPLE_EDGE]
+                                    |              |  Reduce Output Operator [RS_24]
+                                    |              |     key expressions:_col0 (type: string)
+                                    |              |     Map-reduce partition columns:_col0 (type: string)
+                                    |              |     sort order:+
+                                    |              |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |              |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                    |              |     Select Operator [SEL_19]
+                                    |              |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                    |              |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |              |        Filter Operator [FIL_102]
+                                    |              |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                    |              |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                    |              |           TableScan [TS_17]
+                                    |              |              alias:sr
+                                    |              |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                    |              |<-Map 17 [SIMPLE_EDGE]
+                                    |                 Reduce Output Operator [RS_26]
+                                    |                    key expressions:_col0 (type: string)
+                                    |                    Map-reduce partition columns:_col0 (type: string)
+                                    |                    sort order:+
+                                    |                    Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |                    Select Operator [SEL_22]
+                                    |                       outputColumnNames:["_col0"]
+                                    |                       Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |                       Filter Operator [FIL_103]
+                                    |                          predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                    |                          Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |                          TableScan [TS_20]
+                                    |                             alias:d1
+                                    |                             Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |<-Reducer 2 [SIMPLE_EDGE]
+                                       Reduce Output Operator [RS_56]
+                                          key expressions:_col1 (type: string), _col3 (type: string)
+                                          Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
+                                          sort order:++
+                                          Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          value expressions:_col2 (type: string)
+                                          Merge Join Operator [MERGEJOIN_104]
+                                          |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                          |  outputColumnNames:["_col1","_col2","_col3"]
+                                          |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map 1 [SIMPLE_EDGE]
+                                          |  Reduce Output Operator [RS_51]
+                                          |     key expressions:_col0 (type: string)
+                                          |     Map-reduce partition columns:_col0 (type: string)
+                                          |     sort order:+
+                                          |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                          |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                          |     Select Operator [SEL_1]
+                                          |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                          |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                          |        Filter Operator [FIL_96]
+                                          |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                          |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                          |           TableScan [TS_0]
+                                          |              alias:cs
+                                          |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map 6 [SIMPLE_EDGE]
+                                             Reduce Output Operator [RS_53]
+                                                key expressions:_col0 (type: string)
+                                                Map-reduce partition columns:_col0 (type: string)
+                                                sort order:+
                                                 Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                                Filter Operator [FIL_98]
-                                                   predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                                Select Operator [SEL_4]
+                                                   outputColumnNames:["_col0"]
                                                    Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                                   TableScan [TS_2]
-                                                      alias:d1
-                                                      Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                   Filter Operator [FIL_97]
+                                                      predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                                      Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                      TableScan [TS_2]
+                                                         alias:d1
+                                                         Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
 
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value
@@ -1522,199 +1525,202 @@ Stage-0
       limit:100
       Stage-1
          Reducer 5
-         File Output Operator [FS_69]
+         File Output Operator [FS_68]
             compressed:false
             Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Limit [LIM_68]
+            Limit [LIM_67]
                Number of rows:100
                Statistics:Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
-               Select Operator [SEL_67]
+               Select Operator [SEL_66]
                |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                |<-Reducer 4 [SIMPLE_EDGE]
-                  Reduce Output Operator [RS_66]
+                  Reduce Output Operator [RS_65]
                      key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
                      sort order:+++
                      Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
                      value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
-                     Group By Operator [GBY_64]
-                     |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
-                     |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
-                     |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                     |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
-                     |<-Map 3 [SIMPLE_EDGE]
-                        Reduce Output Operator [RS_63]
-                           key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                           Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                           sort order:+++
-                           Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                           value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
-                           Group By Operator [GBY_62]
-                              aggregations:["count(_col3)","count(_col4)","count(_col5)"]
-                              keys:_col0 (type: string), _col1 (type: string), _col2 (type: string)
-                              outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                     Select Operator [SEL_64]
+                        outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                        Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                        Group By Operator [GBY_63]
+                        |  aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"]
+                        |  keys:KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string)
+                        |  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
+                        |  Statistics:Num rows: 402 Data size: 4276 Basic stats: COMPLETE Column stats: NONE
+                        |<-Map 3 [SIMPLE_EDGE]
+                           Reduce Output Operator [RS_62]
+                              key expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+                              Map-reduce partition columns:_col0 (type: string), _col1 (type: string), _col2 (type: string)
+                              sort order:+++
                               Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                              Select Operator [SEL_60]
+                              value expressions:_col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint)
+                              Group By Operator [GBY_61]
+                                 aggregations:["count(_col13)","count(_col21)","count(_col3)"]
+                                 keys:_col2 (type: string), _col12 (type: string), _col20 (type: string)
                                  outputColumnNames:["_col0","_col1","_col2","_col3","_col4","_col5"]
                                  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                                 Map Join Operator [MAPJOIN_111]
-                                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"}
-                                 |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
-                                 |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Map 2 [BROADCAST_EDGE]
-                                 |  Reduce Output Operator [RS_56]
-                                 |     key expressions:_col1 (type: string), _col3 (type: string)
-                                 |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
-                                 |     sort order:++
-                                 |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     value expressions:_col2 (type: string)
-                                 |     Map Join Operator [MAPJOIN_105]
-                                 |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |     |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
-                                 |     |  outputColumnNames:["_col1","_col2","_col3"]
-                                 |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Map 1 [BROADCAST_EDGE]
-                                 |     |  Reduce Output Operator [RS_51]
-                                 |     |     key expressions:_col0 (type: string)
-                                 |     |     Map-reduce partition columns:_col0 (type: string)
-                                 |     |     sort order:+
-                                 |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
-                                 |     |     Select Operator [SEL_1]
-                                 |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                 |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |     |        Filter Operator [FIL_97]
-                                 |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
-                                 |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
-                                 |     |           TableScan [TS_0]
-                                 |     |              alias:cs
-                                 |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
-                                 |     |<-Select Operator [SEL_4]
-                                 |           outputColumnNames:["_col0"]
-                                 |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |           Filter Operator [FIL_98]
-                                 |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                 |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                 |              TableScan [TS_2]
-                                 |                 alias:d1
-                                 |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                 |<-Select Operator [SEL_49]
-                                       outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
-                                       Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                       Map Join Operator [MAPJOIN_110]
-                                       |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |  keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"}
-                                       |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
-                                       |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map 10 [BROADCAST_EDGE]
-                                       |  Reduce Output Operator [RS_47]
-                                       |     key expressions:_col2 (type: string), _col4 (type: string)
-                                       |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
-                                       |     sort order:++
-                                       |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     value expressions:_col3 (type: string), _col5 (type: string)
-                                       |     Map Join Operator [MAPJOIN_109]
-                                       |     |  condition map:[{"":"Inner Join 0 to 1"}]
-                                       |     |  keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"}
-                                       |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
-                                       |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Map 9 [BROADCAST_EDGE]
-                                       |     |  Reduce Output Operator [RS_24]
-                                       |     |     key expressions:_col0 (type: string)
-                                       |     |     Map-reduce partition columns:_col0 (type: string)
-                                       |     |     sort order:+
-                                       |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                       |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
-                                       |     |     Select Operator [SEL_19]
-                                       |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
-                                       |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                       |     |        Filter Operator [FIL_103]
-                                       |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
-                                       |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                       |     |           TableScan [TS_17]
-                                       |     |              alias:sr
-                                       |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                       |     |<-Select Operator [SEL_22]
-                                       |           outputColumnNames:["_col0"]
-                                       |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |           Filter Operator [FIL_104]
-                                       |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
-                                       |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                       |              TableScan [TS_20]
-                                       |                 alias:d1
-                                       |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                       |<-Map Join Operator [MAPJOIN_108]
+                                 Select Operator [SEL_60]
+                                    outputColumnNames:["_col2","_col12","_col20","_col13","_col21","_col3"]
+                                    Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                    Map Join Operator [MAPJOIN_110]
+                                    |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |  keys:{"Map 2":"_col1 (type: string), _col3 (type: string)","Map 3":"_col15 (type: string), _col17 (type: string)"}
+                                    |  outputColumnNames:["_col2","_col3","_col12","_col13","_col20","_col21"]
+                                    |  Statistics:Num rows: 804 Data size: 8552 Basic stats: COMPLETE Column stats: NONE
+                                    |<-Map 2 [BROADCAST_EDGE]
+                                    |  Reduce Output Operator [RS_56]
+                                    |     key expressions:_col1 (type: string), _col3 (type: string)
+                                    |     Map-reduce partition columns:_col1 (type: string), _col3 (type: string)
+                                    |     sort order:++
+                                    |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                    |     value expressions:_col2 (type: string)
+                                    |     Map Join Operator [MAPJOIN_104]
+                                    |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |     |  keys:{"Map 1":"_col0 (type: string)","Map 2":"_col0 (type: string)"}
+                                    |     |  outputColumnNames:["_col1","_col2","_col3"]
+                                    |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                    |     |<-Map 1 [BROADCAST_EDGE]
+                                    |     |  Reduce Output Operator [RS_51]
+                                    |     |     key expressions:_col0 (type: string)
+                                    |     |     Map-reduce partition columns:_col0 (type: string)
+                                    |     |     sort order:+
+                                    |     |     Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                    |     |     value expressions:_col1 (type: string), _col2 (type: string), _col3 (type: string)
+                                    |     |     Select Operator [SEL_1]
+                                    |     |        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                    |     |        Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                    |     |        Filter Operator [FIL_96]
+                                    |     |           predicate:((k1 is not null and v2 is not null) and v3 is not null) (type: boolean)
+                                    |     |           Statistics:Num rows: 22 Data size: 762 Basic stats: COMPLETE Column stats: NONE
+                                    |     |           TableScan [TS_0]
+                                    |     |              alias:cs
+                                    |     |              Statistics:Num rows: 170 Data size: 5890 Basic stats: COMPLETE Column stats: NONE
+                                    |     |<-Select Operator [SEL_4]
+                                    |           outputColumnNames:["_col0"]
+                                    |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |           Filter Operator [FIL_97]
+                                    |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                    |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                    |              TableScan [TS_2]
+                                    |                 alias:d1
+                                    |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                    |<-Select Operator [SEL_49]
+                                          outputColumnNames:["_col14","_col15","_col17","_col6","_col7"]
+                                          Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          Map Join Operator [MAPJOIN_109]
                                           |  condition map:[{"":"Inner Join 0 to 1"}]
-                                          |  keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"}
-                                          |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                          |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
-                                          |<-Map 8 [BROADCAST_EDGE]
-                                          |  Reduce Output Operator [RS_42]
-                                          |     key expressions:_col1 (type: string)
-                                          |     Map-reduce partition columns:_col1 (type: string)
-                                          |     sort order:+
-                                          |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                          |     Select Operator [SEL_16]
-                                          |        outputColumnNames:["_col1"]
-                                          |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                          |        Filter Operator [FIL_102]
-                                          |           predicate:((key = 'src1key') and value is not null) (type: boolean)
-                                          |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
-                                          |           TableScan [TS_14]
-                                          |              alias:src1
-                                          |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
+                                          |  keys:{"Map 3":"_col4 (type: string), _col6 (type: string)","Map 10":"_col2 (type: string), _col4 (type: string)"}
+                                          |  outputColumnNames:["_col2","_col3","_col14","_col15","_col17"]
+                                          |  Statistics:Num rows: 731 Data size: 7775 Basic stats: COMPLETE Column stats: NONE
+                                          |<-Map 10 [BROADCAST_EDGE]
+                                          |  Reduce Output Operator [RS_47]
+                                          |     key expressions:_col2 (type: string), _col4 (type: string)
+                                          |     Map-reduce partition columns:_col2 (type: string), _col4 (type: string)
+                                          |     sort order:++
+                                          |     Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     value expressions:_col3 (type: string), _col5 (type: string)
+                                          |     Map Join Operator [MAPJOIN_108]
+                                          |     |  condition map:[{"":"Inner Join 0 to 1"}]
+                                          |     |  keys:{"Map 9":"_col0 (type: string)","Map 10":"_col0 (type: string)"}
+                                          |     |  outputColumnNames:["_col2","_col3","_col4","_col5"]
+                                          |     |  Statistics:Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Map 9 [BROADCAST_EDGE]
+                                          |     |  Reduce Output Operator [RS_24]
+                                          |     |     key expressions:_col0 (type: string)
+                                          |     |     Map-reduce partition columns:_col0 (type: string)
+                                          |     |     sort order:+
+                                          |     |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |     value expressions:_col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string)
+                                          |     |     Select Operator [SEL_19]
+                                          |     |        outputColumnNames:["_col0","_col2","_col3","_col4","_col5"]
+                                          |     |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |        Filter Operator [FIL_102]
+                                          |     |           predicate:((((((v1 = 'srv1') and k1 is not null) and k2 is not null) and k3 is not null) and v2 is not null) and v3 is not null) (type: boolean)
+                                          |     |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                          |     |           TableScan [TS_17]
+                                          |     |              alias:sr
+                                          |     |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                          |     |<-Select Operator [SEL_22]
+                                          |           outputColumnNames:["_col0"]
+                                          |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |           Filter Operator [FIL_103]
+                                          |              predicate:((value) IN ('2000Q1', '2000Q2', '2000Q3') and key is not null) (type: boolean)
+                                          |              Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                          |              TableScan [TS_20]
+                                          |                 alias:d1
+                                          |                 Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                                           |<-Map Join Operator [MAPJOIN_107]
                                              |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"}
+                                             |  keys:{"Map 3":"_col3 (type: string)","Map 8":"_col1 (type: string)"}
                                              |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                             |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
-                                             |<-Map 7 [BROADCAST_EDGE]
-                                             |  Reduce Output Operator [RS_37]
-                                             |     key expressions:_col0 (type: string)
-                                             |     Map-reduce partition columns:_col0 (type: string)
+                                             |  Statistics:Num rows: 665 Data size: 7069 Basic stats: COMPLETE Column stats: NONE
+                                             |<-Map 8 [BROADCAST_EDGE]
+                                             |  Reduce Output Operator [RS_42]
+                                             |     key expressions:_col1 (type: string)
+                                             |     Map-reduce partition columns:_col1 (type: string)
                                              |     sort order:+
-                                             |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |     Select Operator [SEL_13]
-                                             |        outputColumnNames:["_col0"]
-                                             |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                             |     Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                             |     Select Operator [SEL_16]
+                                             |        outputColumnNames:["_col1"]
+                                             |        Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
                                              |        Filter Operator [FIL_101]
-                                             |           predicate:((value = 'd1value') and key is not null) (type: boolean)
-                                             |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
-                                             |           TableScan [TS_11]
-                                             |              alias:d1
-                                             |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                             |           predicate:((key = 'src1key') and value is not null) (type: boolean)
+                                             |           Statistics:Num rows: 6 Data size: 45 Basic stats: COMPLETE Column stats: NONE
+                                             |           TableScan [TS_14]
+                                             |              alias:src1
+                                             |              Statistics:Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
                                              |<-Map Join Operator [MAPJOIN_106]
                                                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                                |  keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"}
+                                                |  keys:{"Map 3":"_col2 (type: string)","Map 7":"_col0 (type: string)"}
                                                 |  outputColumnNames:["_col2","_col3","_col4","_col6"]
-                                                |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
-                                                |<-Map 6 [BROADCAST_EDGE]
-                                                |  Reduce Output Operator [RS_32]
-                                                |     key expressions:_col3 (type: string)
-                                                |     Map-reduce partition columns:_col3 (type: string)
+                                                |  Statistics:Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Map 7 [BROADCAST_EDGE]
+                                                |  Reduce Output Operator [RS_37]
+                                                |     key expressions:_col0 (type: string)
+                                                |     Map-reduce partition columns:_col0 (type: string)
                                                 |     sort order:+
-                                                |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
-                                                |     Select Operator [SEL_10]
-                                                |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
-                                                |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                |     Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                |     Select Operator [SEL_13]
+                                                |        outputColumnNames:["_col0"]
+                                                |        Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
                                                 |        Filter Operator [FIL_100]
-                                                |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
-                                                |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
-                                                |           TableScan [TS_8]
-                                                |              alias:ss
-                                                |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
-                                                |<-Select Operator [SEL_7]
-                                                      outputColumnNames:["_col1"]
-                                                      Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                                      Filter Operator [FIL_99]
-                                                         predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                                |           predicate:((value = 'd1value') and key is not null) (type: boolean)
+                                                |           Statistics:Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE
+                                                |           TableScan [TS_11]
+                                                |              alias:d1
+                                                |              Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                |<-Map Join Operator [MAPJOIN_105]
+                                                   |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                   |  keys:{"Map 3":"_col1 (type: string)","Map 6":"_col3 (type: string)"}
+                                                   |  outputColumnNames:["_col2","_col3","_col4","_col6"]
+                                                   |  Statistics:Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
+                                                   |<-Map 6 [BROADCAST_EDGE]
+                                                   |  Reduce Output Operator [RS_32]
+                                                   |     key expressions:_col3 (type: string)
+                                                   |     Map-reduce partition columns:_col3 (type: string)
+                                                   |     sort order:+
+                                                   |     Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                   |     value expressions:_col0 (type: string), _col1 (type: string), _col2 (type: string), _col4 (type: string)
+                                                   |     Select Operator [SEL_10]
+                                                   |        outputColumnNames:["_col0","_col1","_col2","_col3","_col4"]
+                                                   |        Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                   |        Filter Operator [FIL_99]
+                                                   |           predicate:((((((v3 = 'ssv3') and v2 is not null) and k1 is not null) and v1 is not null) and k2 is not null) and k3 is not null) (type: boolean)
+                                                   |           Statistics:Num rows: 2 Data size: 69 Basic stats: COMPLETE Column stats: NONE
+                                                   |           TableScan [TS_8]
+                                                   |              alias:ss
+                                                   |              Statistics:Num rows: 85 Data size: 2945 Basic stats: COMPLETE Column stats: NONE
+                                                   |<-Select Operator [SEL_7]
+                                                         outputColumnNames:["_col1"]
                                                          Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-                                                         TableScan [TS_5]
-                                                            alias:srcpart
-                                                            Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
+                                                         Filter Operator [FIL_98]
+                                                            predicate:((key = 'srcpartkey') and value is not null) (type: boolean)
+                                                            Statistics:Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+                                                            TableScan [TS_5]
+                                                               alias:srcpart
+                                                               Statistics:Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
 
 PREHOOK: query: explain
 SELECT x.key, z.value, y.value


[05/41] hive git commit: HIVE-11814: Emit query time in lineage info (Jimmy, reviewed by Ashutosh)

Posted by se...@apache.org.
HIVE-11814: Emit query time in lineage info (Jimmy, reviewed by Ashutosh)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/07ca8120
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/07ca8120
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/07ca8120

Branch: refs/heads/llap
Commit: 07ca81202c9b0138d562fa863cbd60073789bf12
Parents: cabd481
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Mon Sep 14 09:25:04 2015 -0700
Committer: Jimmy Xiang <jx...@cloudera.com>
Committed: Tue Sep 15 09:14:38 2015 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/07ca8120/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index 3c6ce94..f615d81 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -149,8 +149,10 @@ public class LineageLogger implements ExecuteWithHookContext {
           // so that the test golden output file is fixed.
           long queryTime = plan.getQueryStartTime().longValue();
           if (queryTime == 0) queryTime = System.currentTimeMillis();
+          long duration = System.currentTimeMillis() - queryTime;
           writer.name("user").value(hookContext.getUgi().getUserName());
           writer.name("timestamp").value(queryTime/1000);
+          writer.name("duration").value(duration);
           writer.name("jobIds");
           writer.beginArray();
           List<TaskRunner> tasks = hookContext.getCompleteTaskList();


[26/41] hive git commit: HIVE-8327: (repeat) mvn site -Pfindbugs for hive (Gopal V reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-8327: (repeat) mvn site -Pfindbugs for hive (Gopal V reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ce71355c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ce71355c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ce71355c

Branch: refs/heads/llap
Commit: ce71355caf6dc7c693e4b8c1b55f6083b0e77485
Parents: 1cce5f0
Author: Gopal V <go...@apache.org>
Authored: Wed Sep 16 10:43:22 2015 -0700
Committer: Gopal V <go...@apache.org>
Committed: Wed Sep 16 10:43:29 2015 -0700

----------------------------------------------------------------------
 pom.xml | 20 ++++++++++++++++++++
 1 file changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ce71355c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index b55e86a..342224d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1040,6 +1040,26 @@
         </plugins>
       </build>
     </profile>
+
+    <profile>
+    <id>findbugs</id>
+      <reporting>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>findbugs-maven-plugin</artifactId>
+            <version>3.0.0</version>
+            <configuration>
+              <fork>true</fork>
+              <maxHeap>2048</maxHeap>
+              <jvmArgs>-Djava.awt.headless=true -Xmx2048m -Xms512m</jvmArgs>
+              <excludeFilterFile>${project.parent.basedir}/findbugs/findbugs-exclude.xml</excludeFilterFile>
+            </configuration>
+          </plugin>
+        </plugins>
+      </reporting>
+    </profile>
+
     <!-- hadoop profiles in the root pom are only used for dependency management -->
     <profile>
       <id>hadoop-1</id>


[38/41] hive git commit: HIVE-11846: CliDriver shutdown tries to drop index table again which was already dropped when dropping the original table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-11846: CliDriver shutdown tries to drop index table again which was already dropped when dropping the original table (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/68c0e999
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/68c0e999
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/68c0e999

Branch: refs/heads/llap
Commit: 68c0e9993c6aee85d50ce1dc8974916b1e073f67
Parents: b934a80
Author: Pengcheng Xiong <px...@apache.org>
Authored: Fri Sep 18 10:29:52 2015 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Fri Sep 18 10:29:52 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/QTestUtil.java    |   9 +-
 .../clientpositive/drop_table_with_index.q      |  35 +++++
 .../clientpositive/drop_table_with_index.q.out  | 152 +++++++++++++++++++
 3 files changed, 195 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
index 3fae0ba..f23bf2b 100644
--- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
+++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.CuratorFrameworkSingleton;
 import org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager;
 import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
@@ -670,7 +671,13 @@ public class QTestUtil {
       SessionState.get().setCurrentDatabase(dbName);
       for (String tblName : db.getAllTables()) {
         if (!DEFAULT_DATABASE_NAME.equals(dbName) || !srcTables.contains(tblName)) {
-          Table tblObj = db.getTable(tblName);
+          Table tblObj = null;
+          try {
+            tblObj = db.getTable(tblName);
+          } catch (InvalidTableException e) {
+            LOG.warn("Trying to drop table " + e.getTableName() + ". But it does not exist.");
+            continue;
+          }
           // dropping index table can not be dropped directly. Dropping the base
           // table will automatically drop all its index table
           if(tblObj.isIndexTable()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/ql/src/test/queries/clientpositive/drop_table_with_index.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/drop_table_with_index.q b/ql/src/test/queries/clientpositive/drop_table_with_index.q
new file mode 100644
index 0000000..1790664
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/drop_table_with_index.q
@@ -0,0 +1,35 @@
+set hive.stats.dbclass=fs;
+set hive.stats.autogather=true;
+set hive.cbo.enable=true;
+
+DROP TABLE IF EXISTS aa;
+CREATE TABLE aa (L_ORDERKEY      INT,
+                                L_PARTKEY       INT,
+                                L_SUPPKEY       INT,
+                                L_LINENUMBER    INT,
+                                L_QUANTITY      DOUBLE,
+                                L_EXTENDEDPRICE DOUBLE,
+                                L_DISCOUNT      DOUBLE,
+                                L_TAX           DOUBLE,
+                                L_RETURNFLAG    STRING,
+                                L_LINESTATUS    STRING,
+                                l_shipdate      STRING,
+                                L_COMMITDATE    STRING,
+                                L_RECEIPTDATE   STRING,
+                                L_SHIPINSTRUCT  STRING,
+                                L_SHIPMODE      STRING,
+                                L_COMMENT       STRING)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|';
+
+LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa;
+
+CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)");
+ALTER INDEX aa_lshipdate_idx ON aa REBUILD;
+
+show tables;
+
+explain select l_shipdate, count(l_shipdate)
+from aa
+group by l_shipdate;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/68c0e999/ql/src/test/results/clientpositive/drop_table_with_index.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/drop_table_with_index.q.out b/ql/src/test/results/clientpositive/drop_table_with_index.q.out
new file mode 100644
index 0000000..d1b0d6d
--- /dev/null
+++ b/ql/src/test/results/clientpositive/drop_table_with_index.q.out
@@ -0,0 +1,152 @@
+PREHOOK: query: DROP TABLE IF EXISTS aa
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS aa
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE aa (L_ORDERKEY      INT,
+                                L_PARTKEY       INT,
+                                L_SUPPKEY       INT,
+                                L_LINENUMBER    INT,
+                                L_QUANTITY      DOUBLE,
+                                L_EXTENDEDPRICE DOUBLE,
+                                L_DISCOUNT      DOUBLE,
+                                L_TAX           DOUBLE,
+                                L_RETURNFLAG    STRING,
+                                L_LINESTATUS    STRING,
+                                l_shipdate      STRING,
+                                L_COMMITDATE    STRING,
+                                L_RECEIPTDATE   STRING,
+                                L_SHIPINSTRUCT  STRING,
+                                L_SHIPMODE      STRING,
+                                L_COMMENT       STRING)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@aa
+POSTHOOK: query: CREATE TABLE aa (L_ORDERKEY      INT,
+                                L_PARTKEY       INT,
+                                L_SUPPKEY       INT,
+                                L_LINENUMBER    INT,
+                                L_QUANTITY      DOUBLE,
+                                L_EXTENDEDPRICE DOUBLE,
+                                L_DISCOUNT      DOUBLE,
+                                L_TAX           DOUBLE,
+                                L_RETURNFLAG    STRING,
+                                L_LINESTATUS    STRING,
+                                l_shipdate      STRING,
+                                L_COMMITDATE    STRING,
+                                L_RECEIPTDATE   STRING,
+                                L_SHIPINSTRUCT  STRING,
+                                L_SHIPMODE      STRING,
+                                L_COMMENT       STRING)
+ROW FORMAT DELIMITED
+FIELDS TERMINATED BY '|'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@aa
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@aa
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE aa
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@aa
+PREHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)")
+PREHOOK: type: CREATEINDEX
+PREHOOK: Input: default@aa
+POSTHOOK: query: CREATE INDEX aa_lshipdate_idx ON TABLE aa(l_shipdate) AS 'org.apache.hadoop.hive.ql.index.AggregateIndexHandler' WITH DEFERRED REBUILD IDXPROPERTIES("AGGREGATES"="count(l_shipdate)")
+POSTHOOK: type: CREATEINDEX
+POSTHOOK: Input: default@aa
+POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__
+PREHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD
+PREHOOK: type: ALTERINDEX_REBUILD
+PREHOOK: Input: default@aa
+PREHOOK: Output: default@default__aa_aa_lshipdate_idx__
+POSTHOOK: query: ALTER INDEX aa_lshipdate_idx ON aa REBUILD
+POSTHOOK: type: ALTERINDEX_REBUILD
+POSTHOOK: Input: default@aa
+POSTHOOK: Output: default@default__aa_aa_lshipdate_idx__
+POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._bucketname SIMPLE [(aa)aa.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
+POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._count_of_l_shipdate EXPRESSION [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ]
+POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__._offsets EXPRESSION [(aa)aa.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
+POSTHOOK: Lineage: default__aa_aa_lshipdate_idx__.l_shipdate SIMPLE [(aa)aa.FieldSchema(name:l_shipdate, type:string, comment:null), ]
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+aa
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+default__aa_aa_lshipdate_idx__
+lineitem
+part
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+PREHOOK: query: explain select l_shipdate, count(l_shipdate)
+from aa
+group by l_shipdate
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select l_shipdate, count(l_shipdate)
+from aa
+group by l_shipdate
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: aa
+            Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: l_shipdate (type: string)
+              outputColumnNames: l_shipdate
+              Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: count(l_shipdate)
+                keys: l_shipdate (type: string)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col0 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: string)
+                  Statistics: Num rows: 120 Data size: 12099 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col1 (type: bigint)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          keys: KEY._col0 (type: string)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 60 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 60 Data size: 6049 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.TextInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+


[35/41] hive git commit: HIVE-11834: Lineage doesn't work with dynamic partitioning query (Jimmy, reviewed by Szehon)

Posted by se...@apache.org.
HIVE-11834: Lineage doesn't work with dynamic partitioning query (Jimmy, reviewed by Szehon)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2278548e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2278548e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2278548e

Branch: refs/heads/llap
Commit: 2278548e41bf7b51e7b604fe9c91905b1ca198f1
Parents: 3cf7bd9
Author: Jimmy Xiang <jx...@cloudera.com>
Authored: Wed Sep 16 08:09:41 2015 -0700
Committer: Jimmy Xiang <jx...@cloudera.com>
Committed: Fri Sep 18 07:19:23 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/hooks/LineageLogger.java     | 93 +++++++++++---------
 .../hive/ql/optimizer/lineage/LineageCtx.java   |  8 +-
 ql/src/test/queries/clientpositive/lineage3.q   | 26 ++++++
 .../test/results/clientpositive/lineage3.q.out  | 68 +++++++++++++-
 4 files changed, 153 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2278548e/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
index f615d81..9988c79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
@@ -215,8 +215,7 @@ public class LineageLogger implements ExecuteWithHookContext {
   private List<Edge> getEdges(QueryPlan plan, Index index) {
     LinkedHashMap<String, ObjectPair<SelectOperator,
       org.apache.hadoop.hive.ql.metadata.Table>> finalSelOps = index.getFinalSelectOps();
-    Set<Vertex> allTargets = new LinkedHashSet<Vertex>();
-    Map<String, Vertex> allSources = new LinkedHashMap<String, Vertex>();
+    Map<String, Vertex> vertexCache = new LinkedHashMap<String, Vertex>();
     List<Edge> edges = new ArrayList<Edge>();
     for (ObjectPair<SelectOperator,
         org.apache.hadoop.hive.ql.metadata.Table> pair: finalSelOps.values()) {
@@ -244,41 +243,46 @@ public class LineageLogger implements ExecuteWithHookContext {
           }
         }
       }
-      int fields = fieldSchemas.size();
       Map<ColumnInfo, Dependency> colMap = index.getDependencies(finalSelOp);
       List<Dependency> dependencies = colMap != null ? Lists.newArrayList(colMap.values()) : null;
+      int fields = fieldSchemas.size();
+      if (t != null && colMap != null && fields < colMap.size()) {
+        // Dynamic partition keys should be added to field schemas.
+        List<FieldSchema> partitionKeys = t.getPartitionKeys();
+        int dynamicKeyCount = colMap.size() - fields;
+        int keyOffset = partitionKeys.size() - dynamicKeyCount;
+        if (keyOffset >= 0) {
+          fields += dynamicKeyCount;
+          for (int i = 0; i < dynamicKeyCount; i++) {
+            FieldSchema field = partitionKeys.get(keyOffset + i);
+            fieldSchemas.add(field);
+            if (colNames != null) {
+              colNames.add(field.getName());
+            }
+          }
+        }
+      }
       if (dependencies == null || dependencies.size() != fields) {
         log("Result schema has " + fields
           + " fields, but we don't get as many dependencies");
       } else {
         // Go through each target column, generate the lineage edges.
+        Set<Vertex> targets = new LinkedHashSet<Vertex>();
         for (int i = 0; i < fields; i++) {
-          Vertex target = new Vertex(
-            getTargetFieldName(i, destTableName, colNames, fieldSchemas));
-          allTargets.add(target);
+          Vertex target = getOrCreateVertex(vertexCache,
+            getTargetFieldName(i, destTableName, colNames, fieldSchemas),
+            Vertex.Type.COLUMN);
+          targets.add(target);
           Dependency dep = dependencies.get(i);
-          String expr = dep.getExpr();
-          Set<Vertex> sources = createSourceVertices(allSources, dep.getBaseCols());
-          Edge edge = findSimilarEdgeBySources(edges, sources, expr, Edge.Type.PROJECTION);
-          if (edge == null) {
-            Set<Vertex> targets = new LinkedHashSet<Vertex>();
-            targets.add(target);
-            edges.add(new Edge(sources, targets, expr, Edge.Type.PROJECTION));
-          } else {
-            edge.targets.add(target);
-          }
+          addEdge(vertexCache, edges, dep.getBaseCols(), target,
+            dep.getExpr(), Edge.Type.PROJECTION);
         }
         Set<Predicate> conds = index.getPredicates(finalSelOp);
         if (conds != null && !conds.isEmpty()) {
           for (Predicate cond: conds) {
-            String expr = cond.getExpr();
-            Set<Vertex> sources = createSourceVertices(allSources, cond.getBaseCols());
-            Edge edge = findSimilarEdgeByTargets(edges, allTargets, expr, Edge.Type.PREDICATE);
-            if (edge == null) {
-              edges.add(new Edge(sources, allTargets, expr, Edge.Type.PREDICATE));
-            } else {
-              edge.sources.addAll(sources);
-            }
+            addEdge(vertexCache, edges, cond.getBaseCols(),
+              new LinkedHashSet<Vertex>(targets), cond.getExpr(),
+              Edge.Type.PREDICATE);
           }
         }
       }
@@ -286,12 +290,35 @@ public class LineageLogger implements ExecuteWithHookContext {
     return edges;
   }
 
+  private void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
+      Set<BaseColumnInfo> srcCols, Vertex target, String expr, Edge.Type type) {
+    Set<Vertex> targets = new LinkedHashSet<Vertex>();
+    targets.add(target);
+    addEdge(vertexCache, edges, srcCols, targets, expr, type);
+  }
+
+  /**
+   * Find an edge from all edges that has the same source vertices.
+   * If found, add the more targets to this edge's target vertex list.
+   * Otherwise, create a new edge and add to edge list.
+   */
+  private void addEdge(Map<String, Vertex> vertexCache, List<Edge> edges,
+      Set<BaseColumnInfo> srcCols, Set<Vertex> targets, String expr, Edge.Type type) {
+    Set<Vertex> sources = createSourceVertices(vertexCache, srcCols);
+    Edge edge = findSimilarEdgeBySources(edges, sources, expr, type);
+    if (edge == null) {
+      edges.add(new Edge(sources, targets, expr, type));
+    } else {
+      edge.targets.addAll(targets);
+    }
+  }
+
   /**
    * Convert a list of columns to a set of vertices.
    * Use cached vertices if possible.
    */
   private Set<Vertex> createSourceVertices(
-      Map<String, Vertex> srcVertexCache, Collection<BaseColumnInfo> baseCols) {
+      Map<String, Vertex> vertexCache, Collection<BaseColumnInfo> baseCols) {
     Set<Vertex> sources = new LinkedHashSet<Vertex>();
     if (baseCols != null && !baseCols.isEmpty()) {
       for(BaseColumnInfo col: baseCols) {
@@ -308,7 +335,7 @@ public class LineageLogger implements ExecuteWithHookContext {
           type = Vertex.Type.COLUMN;
           label = tableName + "." + fieldSchema.getName();
         }
-        sources.add(getOrCreateVertex(srcVertexCache, label, type));
+        sources.add(getOrCreateVertex(vertexCache, label, type));
       }
     }
     return sources;
@@ -342,20 +369,6 @@ public class LineageLogger implements ExecuteWithHookContext {
   }
 
   /**
-   * Find an edge that has the same type, expression, and targets.
-   */
-  private Edge findSimilarEdgeByTargets(
-      List<Edge> edges, Set<Vertex> targets, String expr, Edge.Type type) {
-    for (Edge edge: edges) {
-      if (edge.type == type && StringUtils.equals(edge.expr, expr)
-          && SetUtils.isEqualSet(edge.targets, targets)) {
-        return edge;
-      }
-    }
-    return null;
-  }
-
-  /**
    * Generate normalized name for a given target column.
    */
   private String getTargetFieldName(int fieldIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/2278548e/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
index c33d775..2d8b9e3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.hive.ql.optimizer.lineage;
 
-import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
@@ -171,6 +171,12 @@ public class LineageCtx implements NodeProcessorCtx {
         conds = new LinkedHashSet<Predicate>();
         condMap.put(op, conds);
       }
+      for (Predicate p: conds) {
+        if (StringUtils.equals(cond.getExpr(), p.getExpr())) {
+          p.getBaseCols().addAll(cond.getBaseCols());
+          return;
+        }
+      }
       conds.add(cond);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/2278548e/ql/src/test/queries/clientpositive/lineage3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
index c24ff7d..70d4e57 100644
--- a/ql/src/test/queries/clientpositive/lineage3.q
+++ b/ql/src/test/queries/clientpositive/lineage3.q
@@ -176,3 +176,29 @@ alter view dest_v3 as
 
 select * from dest_v3 limit 2;
 
+drop table if exists src_dp;
+create table src_dp (first string, word string, year int, month int, day int);
+drop table if exists dest_dp1;
+create table dest_dp1 (first string, word string) partitioned by (year int);
+drop table if exists dest_dp2;
+create table dest_dp2 (first string, word string) partitioned by (y int, m int);
+drop table if exists dest_dp3;
+create table dest_dp3 (first string, word string) partitioned by (y int, m int, d int);
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+
+insert into dest_dp1 partition (year) select first, word, year from src_dp;
+insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp;
+insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0;
+insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0;
+
+drop table if exists src_dp1;
+create table src_dp1 (f string, w string, m int);
+
+from src_dp, src_dp1
+insert into dest_dp1 partition (year) select first, word, year
+insert into dest_dp2 partition (y, m) select first, word, year, month
+insert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2
+insert into dest_dp2 partition (y=1, m) select f, w, m
+insert into dest_dp1 partition (year=0) select f, w;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/2278548e/ql/src/test/results/clientpositive/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
index 708abee..ad965c8 100644
--- a/ql/src/test/results/clientpositive/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/lineage3.q.out
@@ -25,7 +25,7 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@alltypesorc
 PREHOOK: Output: default@d1
 PREHOOK: Output: default@d2
-{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.
 cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
+{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[5],"targets":[0],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[5],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint
 "},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
 PREHOOK: query: drop table if exists t
 PREHOOK: type: DROPTABLE
 PREHOOK: query: create table t as
@@ -320,3 +320,69 @@ PREHOOK: Input: default@dest_v3
 {"version":"1.0","engine":"mr","hash":"40bccc0722002f798d0548b59e369e83","queryText":"select * from dest_v3 limit 2","edges":[{"sources":[3,4,5,6,7],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) ctinyint) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) csmallint)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) csmallint)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[6],"targets":[1],"expression":"count(default.alltypesorc.cstring1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2],"expression":"(a.cboolean2 = true)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(a.cint = a.cint)","edgeType":"PREDICATE"},{"sources":[9],"targets":[0,1,2],"expression":"(a.cfloat > 0.0)","edgeType":"PREDICATE"},{"sources":[7],"targets":[0,1,2],"expression":"(count(default.alltypesorc.c
 int) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"dest_v3.a"},{"id":1,"vertexType":"COLUMN","vertexId":"dest_v3.x"},{"id":2,"vertexType":"COLUMN","vertexId":"dest_v3.cboolean1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"}]}
 38	216	false
 38	229	true
+PREHOOK: query: drop table if exists src_dp
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table src_dp (first string, word string, year int, month int, day int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_dp
+PREHOOK: query: drop table if exists dest_dp1
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table dest_dp1 (first string, word string) partitioned by (year int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_dp1
+PREHOOK: query: drop table if exists dest_dp2
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table dest_dp2 (first string, word string) partitioned by (y int, m int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_dp2
+PREHOOK: query: drop table if exists dest_dp3
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table dest_dp3 (first string, word string) partitioned by (y int, m int, d int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@dest_dp3
+PREHOOK: query: insert into dest_dp1 partition (year) select first, word, year from src_dp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dp
+PREHOOK: Output: default@dest_dp1
+{"version":"1.0","engine":"mr","hash":"b2d38401a3281e74a003d9650df97060","queryText":"insert into dest_dp1 partition (year) select first, word, year from src_dp","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+PREHOOK: query: insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dp
+PREHOOK: Output: default@dest_dp2
+{"version":"1.0","engine":"mr","hash":"237302d8ffd62b5b71d9544b22de7770","queryText":"insert into dest_dp2 partition (y, m) select first, word, year, month from src_dp","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src_dp.month"}]}
+PREHOOK: query: insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dp
+PREHOOK: Output: default@dest_dp2@y=0
+{"version":"1.0","engine":"mr","hash":"63e990b47e7ab4eb6f2ea09dfb7453ff","queryText":"insert into dest_dp2 partition (y=0, m) select first, word, month from src_dp where year=0","edges":[{"sources":[3],"targets":[0],"edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"edgeType":"PROJECTION"},{"sources":[5],"targets":[2],"edgeType":"PROJECTION"},{"sources":[6],"targets":[0,1,2],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+PREHOOK: query: insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dp
+PREHOOK: Output: default@dest_dp3@y=0
+{"version":"1.0","engine":"mr","hash":"6bf71a9d02c0612c63b6f40b15c1e8b3","queryText":"insert into dest_dp3 partition (y=0, m, d) select first, word, month m, day d from src_dp where year=0","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3],"expression":"(src_dp.year = 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src_dp.month"},{"id":7,"vertexType":"CO
 LUMN","vertexId":"default.src_dp.day"},{"id":8,"vertexType":"COLUMN","vertexId":"default.src_dp.year"}]}
+PREHOOK: query: drop table if exists src_dp1
+PREHOOK: type: DROPTABLE
+PREHOOK: query: create table src_dp1 (f string, w string, m int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_dp1
+Warning: Shuffle Join JOIN[4][tables = [src_dp, src_dp1]] in Stage 'Stage-5:MAPRED' is a cross product
+PREHOOK: query: from src_dp, src_dp1
+insert into dest_dp1 partition (year) select first, word, year
+insert into dest_dp2 partition (y, m) select first, word, year, month
+insert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2
+insert into dest_dp2 partition (y=1, m) select f, w, m
+insert into dest_dp1 partition (year=0) select f, w
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_dp
+PREHOOK: Input: default@src_dp1
+PREHOOK: Output: default@dest_dp1
+PREHOOK: Output: default@dest_dp1@year=0
+PREHOOK: Output: default@dest_dp2
+PREHOOK: Output: default@dest_dp2@y=1
+PREHOOK: Output: default@dest_dp3@y=2
+{"version":"1.0","engine":"mr","hash":"44f16edbf35cfeaf3d4f7b0113a69b74","queryText":"from src_dp, src_dp1\ninsert into dest_dp1 partition (year) select first, word, year\ninsert into dest_dp2 partition (y, m) select first, word, year, month\ninsert into dest_dp3 partition (y=2, m, d) select first, word, month m, day d where year=2\ninsert into dest_dp2 partition (y=1, m) select f, w, m\ninsert into dest_dp1 partition (year=0) select f, w","edges":[{"sources":[11],"targets":[0,1,2],"edgeType":"PROJECTION"},{"sources":[12],"targets":[3,4,5],"edgeType":"PROJECTION"},{"sources":[13],"targets":[6,7],"edgeType":"PROJECTION"},{"sources":[14],"targets":[8,9],"edgeType":"PROJECTION"},{"sources":[15],"targets":[1,0],"edgeType":"PROJECTION"},{"sources":[16],"targets":[4,3],"edgeType":"PROJECTION"},{"sources":[17],"targets":[8],"edgeType":"PROJECTION"},{"sources":[18],"targets":[10],"edgeType":"PROJECTION"},{"sources":[13],"targets":[2,5,9,10],"expression":"(src_dp.year = 2)","edgeType":"PREDI
 CATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_dp1.first"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_dp2.first"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_dp3.first"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_dp1.word"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_dp2.word"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_dp3.word"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_dp1.year"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_dp2.y"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_dp2.m"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_dp3.m"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_dp3.d"},{"id":11,"vertexType":"COLUMN","vertexId":"default.src_dp.first"},{"id":12,"vertexType":"COLUMN","vertexId":"default.src_dp.word"},{"id":13,"vertexType":"COLUMN","vertexId":"default.src_dp.year"},{"id":14,"vertexType":"COLUMN","vertexId":"default.src_dp.month
 "},{"id":15,"vertexType":"COLUMN","vertexId":"default.src_dp1.f"},{"id":16,"vertexType":"COLUMN","vertexId":"default.src_dp1.w"},{"id":17,"vertexType":"COLUMN","vertexId":"default.src_dp1.m"},{"id":18,"vertexType":"COLUMN","vertexId":"default.src_dp.day"}]}


[08/41] hive git commit: HIVE-11705 : refactor SARG stripe filtering for ORC into a separate method (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by se...@apache.org.
HIVE-11705 : refactor SARG stripe filtering for ORC into a separate method (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ba0b33c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ba0b33c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ba0b33c1

Branch: refs/heads/llap
Commit: ba0b33c1025625b92bd669da60d2789f315e27f7
Parents: bc62a46
Author: Sergey Shelukhin <se...@apache.org>
Authored: Tue Sep 15 18:09:54 2015 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Tue Sep 15 18:09:54 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 151 ++++++++++++-------
 .../apache/hadoop/hive/ql/io/orc/OrcSerde.java  |   1 +
 .../hadoop/hive/ql/io/orc/RecordReaderImpl.java |   4 +-
 .../hive/ql/io/parquet/ProjectionPusher.java    |   3 +-
 .../hive/ql/io/sarg/ConvertAstToSearchArg.java  |   4 +
 .../ql/optimizer/ColumnPrunerProcFactory.java   |   3 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   2 +
 .../hive/serde2/ColumnProjectionUtils.java      |  22 +++
 .../hive/ql/io/sarg/SearchArgumentFactory.java  |   5 +-
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |   7 +-
 10 files changed, 142 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index cf8694e..2500fb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -58,11 +58,13 @@ import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+import org.apache.hadoop.hive.ql.io.orc.OrcFile.WriterVersion;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -265,8 +267,7 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     OrcProto.Type root = types.get(rootColumn);
     for(int i=0; i < root.getSubtypesCount(); ++i) {
       if (included.contains(i)) {
-        includeColumnRecursive(types, result, root.getSubtypes(i),
-            rootColumn);
+        includeColumnRecursive(types, result, root.getSubtypes(i), rootColumn);
       }
     }
     return result;
@@ -292,6 +293,13 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
     int rootColumn = getRootColumn(isOriginal);
     String[] columnNames = new String[types.size() - rootColumn];
     int i = 0;
+    // The way this works is as such. originalColumnNames is the equivalent on getNeededColumns
+    // from TSOP. They are assumed to be in the same order as the columns in ORC file, AND they are
+    // assumed to be equivalent to the columns in includedColumns (because it was generated from
+    // the same column list at some point in the past), minus the subtype columns. Therefore, when
+    // we go thru all the top level ORC file columns that are included, in order, they match
+    // originalColumnNames. This way, we do not depend on names stored inside ORC for SARG leaf
+    // column name resolution (see mapSargColumns method).
     for(int columnId: types.get(rootColumn).getSubtypesList()) {
       if (includedColumns == null || includedColumns[columnId - rootColumn]) {
         // this is guaranteed to be positive because types only have children
@@ -306,8 +314,8 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
                                 List<OrcProto.Type> types,
                                 Configuration conf,
                                 boolean isOriginal) {
-    String columnNamesString = conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
-    if (columnNamesString == null) {
+    String neededColumnNames = getNeededColumnNamesString(conf);
+    if (neededColumnNames == null) {
       LOG.debug("No ORC pushdown predicate - no column names");
       options.searchArgument(null, null);
       return;
@@ -321,9 +329,39 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
 
     LOG.info("ORC pushdown predicate: " + sarg);
     options.searchArgument(sarg, getSargColumnNames(
-        columnNamesString.split(","), types, options.getInclude(), isOriginal));
+        neededColumnNames.split(","), types, options.getInclude(), isOriginal));
   }
 
+  static boolean canCreateSargFromConf(Configuration conf) {
+    if (getNeededColumnNamesString(conf) == null) {
+      LOG.debug("No ORC pushdown predicate - no column names");
+      return false;
+    }
+    if (!ConvertAstToSearchArg.canCreateFromConf(conf)) {
+      LOG.debug("No ORC pushdown predicate");
+      return false;
+    }
+    return true;
+  }
+
+  private static String[] extractNeededColNames(
+      List<OrcProto.Type> types, Configuration conf, boolean[] include, boolean isOriginal) {
+    return extractNeededColNames(types, getNeededColumnNamesString(conf), include, isOriginal);
+  }
+
+  private static String[] extractNeededColNames(
+      List<OrcProto.Type> types, String columnNamesString, boolean[] include, boolean isOriginal) {
+    return getSargColumnNames(columnNamesString.split(","), types, include, isOriginal);
+  }
+
+  private static String getNeededColumnNamesString(Configuration conf) {
+    return conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
+  }
+
+  private static String getSargColumnIDsString(Configuration conf) {
+    return conf.getBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, true) ? null
+        : conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
+  }
   @Override
   public boolean validateInput(FileSystem fs, HiveConf conf,
                                ArrayList<FileStatus> files
@@ -863,34 +901,11 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
 
       // we can't eliminate stripes if there are deltas because the
       // deltas may change the rows making them match the predicate.
-      if (deltas.isEmpty()) {
-        Reader.Options options = new Reader.Options();
-        options.include(includedCols);
-        setSearchArgument(options, types, context.conf, isOriginal);
-        // only do split pruning if HIVE-8732 has been fixed in the writer
-        if (options.getSearchArgument() != null &&
-            writerVersion != OrcFile.WriterVersion.ORIGINAL) {
-          SearchArgument sarg = options.getSearchArgument();
-          List<PredicateLeaf> sargLeaves = sarg.getLeaves();
-          List<StripeStatistics> stripeStats = metadata.getStripeStatistics();
-          int[] filterColumns = RecordReaderImpl.mapSargColumns(sargLeaves,
-              options.getColumnNames(), getRootColumn(isOriginal));
-
-          if (stripeStats != null) {
-            // eliminate stripes that doesn't satisfy the predicate condition
-            includeStripe = new boolean[stripes.size()];
-            for (int i = 0; i < stripes.size(); ++i) {
-              includeStripe[i] = (i >= stripeStats.size()) ||
-                  isStripeSatisfyPredicate(stripeStats.get(i), sarg,
-                      filterColumns);
-              if (isDebugEnabled && !includeStripe[i]) {
-                LOG.debug("Eliminating ORC stripe-" + i + " of file '" +
-                    file.getPath() + "'  as it did not satisfy " +
-                    "predicate condition.");
-              }
-            }
-          }
-        }
+      if (deltas.isEmpty() && canCreateSargFromConf(context.conf)) {
+        SearchArgument sarg = ConvertAstToSearchArg.createFromConf(context.conf);
+        String[] sargColNames = extractNeededColNames(types, context.conf, includedCols, isOriginal);
+        includeStripe = pickStripes(sarg, sargColNames, writerVersion, isOriginal,
+            metadata.getStripeStatistics(), stripes.size(), file.getPath());
       }
 
       // if we didn't have predicate pushdown, read everything
@@ -990,28 +1005,6 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
       }
       return orcReader.getRawDataSizeFromColIndices(internalColIds);
     }
-
-    private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics,
-                                             SearchArgument sarg,
-                                             int[] filterColumns) {
-      List<PredicateLeaf> predLeaves = sarg.getLeaves();
-      TruthValue[] truthValues = new TruthValue[predLeaves.size()];
-      for (int pred = 0; pred < truthValues.length; pred++) {
-        if (filterColumns[pred] != -1) {
-
-          // column statistics at index 0 contains only the number of rows
-          ColumnStatistics stats = stripeStatistics.getColumnStatistics()[filterColumns[pred]];
-          truthValues[pred] = RecordReaderImpl.evaluatePredicate(stats, predLeaves.get(pred), null);
-        } else {
-
-          // parition column case.
-          // partition filter will be evaluated by partition pruner so
-          // we will not evaluate partition filter here.
-          truthValues[pred] = TruthValue.YES_NO_NULL;
-        }
-      }
-      return sarg.evaluate(truthValues).isNeeded();
-    }
   }
 
   static List<OrcSplit> generateSplitsInfo(Configuration conf)
@@ -1353,6 +1346,54 @@ public class OrcInputFormat  implements InputFormat<NullWritable, OrcStruct>,
         directory);
   }
 
+  private static boolean[] pickStripes(SearchArgument sarg, String[] sargColNames,
+      WriterVersion writerVersion, boolean isOriginal, List<StripeStatistics> stripeStats,
+      int stripeCount, Path filePath) {
+    LOG.info("ORC pushdown predicate: " + sarg);
+    if (sarg == null || stripeStats == null || writerVersion == OrcFile.WriterVersion.ORIGINAL) {
+      return null; // only do split pruning if HIVE-8732 has been fixed in the writer
+    }
+    // eliminate stripes that doesn't satisfy the predicate condition
+    List<PredicateLeaf> sargLeaves = sarg.getLeaves();
+    int[] filterColumns = RecordReaderImpl.mapSargColumnsToOrcInternalColIdx(sargLeaves,
+        sargColNames, getRootColumn(isOriginal));
+    return pickStripesInternal(sarg, filterColumns, stripeStats, stripeCount, filePath);
+  }
+
+  private static boolean[] pickStripesInternal(SearchArgument sarg, int[] filterColumns,
+      List<StripeStatistics> stripeStats, int stripeCount, Path filePath) {
+    boolean[] includeStripe = new boolean[stripeCount];
+    for (int i = 0; i < includeStripe.length; ++i) {
+      includeStripe[i] = (i >= stripeStats.size()) ||
+          isStripeSatisfyPredicate(stripeStats.get(i), sarg, filterColumns);
+      if (isDebugEnabled && !includeStripe[i]) {
+        LOG.debug("Eliminating ORC stripe-" + i + " of file '" + filePath
+            + "'  as it did not satisfy predicate condition.");
+      }
+    }
+    return includeStripe;
+  }
+
+  private static boolean isStripeSatisfyPredicate(
+      StripeStatistics stripeStatistics, SearchArgument sarg, int[] filterColumns) {
+    List<PredicateLeaf> predLeaves = sarg.getLeaves();
+    TruthValue[] truthValues = new TruthValue[predLeaves.size()];
+    for (int pred = 0; pred < truthValues.length; pred++) {
+      if (filterColumns[pred] != -1) {
+
+        // column statistics at index 0 contains only the number of rows
+        ColumnStatistics stats = stripeStatistics.getColumnStatistics()[filterColumns[pred]];
+        truthValues[pred] = RecordReaderImpl.evaluatePredicate(stats, predLeaves.get(pred), null);
+      } else {
+
+        // parition column case.
+        // partition filter will be evaluated by partition pruner so
+        // we will not evaluate partition filter here.
+        truthValues[pred] = TruthValue.YES_NO_NULL;
+      }
+    }
+    return sarg.evaluate(truthValues).isNeeded();
+  }
 
   @VisibleForTesting
   static SplitStrategy determineSplitStrategy(Context context, FileSystem fs, Path dir,

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
index 8beff4b..595f3b3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
@@ -108,6 +108,7 @@ public class OrcSerde implements SerDe, VectorizedSerde {
     ArrayList<TypeInfo> fieldTypes =
         TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
     StructTypeInfo rootType = new StructTypeInfo();
+    // The source column names for ORC serde that will be used in the schema.
     rootType.setAllStructFieldNames(columnNames);
     rootType.setAllStructFieldTypeInfos(fieldTypes);
     inspector = OrcStruct.createObjectInspector(rootType);

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index fcb3746..ba304ba 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -137,7 +137,7 @@ class RecordReaderImpl implements RecordReader {
    *                   result
    * @return an array mapping the sarg leaves to concrete column numbers
    */
-  public static int[] mapSargColumns(List<PredicateLeaf> sargLeaves,
+  public static int[] mapSargColumnsToOrcInternalColIdx(List<PredicateLeaf> sargLeaves,
                              String[] columnNames,
                              int rootColumn) {
     int[] result = new int[sargLeaves.size()];
@@ -693,7 +693,7 @@ class RecordReaderImpl implements RecordReader {
         List<OrcProto.Type> types, int includedCount) {
       this.sarg = sarg;
       sargLeaves = sarg.getLeaves();
-      filterColumns = mapSargColumns(sargLeaves, columnNames, 0);
+      filterColumns = mapSargColumnsToOrcInternalColIdx(sargLeaves, columnNames, 0);
       this.rowIndexStride = rowIndexStride;
       // included will not be null, row options will fill the array with trues if null
       sargColumns = new boolean[includedCount];

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java
index 4480600..4848efd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java
@@ -65,6 +65,7 @@ public class ProjectionPusher {
     }
   }
 
+  @Deprecated  // Uses deprecated methods on ColumnProjectionUtils
   private void pushProjectionsAndFilters(final JobConf jobConf,
       final String splitPath, final String splitPathWithNoSchema) {
 
@@ -136,7 +137,7 @@ public class ProjectionPusher {
         filterExprSerialized);
   }
 
-
+  @Deprecated // Uses deprecated methods on ColumnProjectionUtils
   public JobConf pushProjectionsAndFilters(JobConf jobConf, Path path)
       throws IOException {
     updateMrWork(jobConf);  // TODO: refactor this in HIVE-6366

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
index e034650..690b8c9 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
@@ -433,4 +433,8 @@ public class ConvertAstToSearchArg {
     return null;
   }
 
+  public static boolean canCreateFromConf(Configuration conf) {
+    return conf.get(TableScanDesc.FILTER_EXPR_CONF_STR) != null || conf.get(SARG_PUSHDOWN) != null;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
index 2dc15f9..b104a7d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
@@ -488,6 +488,9 @@ public final class ColumnPrunerProcFactory {
     }
   }
 
+  /** Sets up needed columns for TSOP. Mainly, transfers column names from input
+   * RowSchema as well as the needed virtual columns, into TableScanDesc.
+   */
   public static void setupNeededColumns(TableScanOperator scanOp, RowSchema inputRS,
       List<String> cols) throws SemanticException {
     List<Integer> neededColumnIds = new ArrayList<Integer>();

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 16957b6..1076dfd 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -9324,6 +9324,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
 
     if (top == null) {
+      // Determine row schema for TSOP.
+      // Include column names from SerDe, the partition and virtual columns.
       rwsch = new RowResolver();
       try {
         StructObjectInspector rowObjectInspector = (StructObjectInspector) tab

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
index 10086c5..cbad3b2 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/ColumnProjectionUtils.java
@@ -22,9 +22,12 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
 
@@ -33,6 +36,7 @@ import com.google.common.collect.Lists;
  *
  */
 public final class ColumnProjectionUtils {
+  public static final Log LOG = LogFactory.getLog(ColumnProjectionUtils.class);
 
   public static final String READ_COLUMN_IDS_CONF_STR = "hive.io.file.readcolumn.ids";
   public static final String READ_ALL_COLUMNS = "hive.io.file.read.all.columns";
@@ -54,6 +58,7 @@ public final class ColumnProjectionUtils {
    * and appendReadColumns
    */
   @Deprecated
+  @VisibleForTesting
   public static void setReadColumnIDs(Configuration conf, List<Integer> ids) {
     setReadColumnIDConf(conf, READ_COLUMN_IDS_CONF_STR_DEFAULT);
     appendReadColumns(conf, ids);
@@ -102,8 +107,21 @@ public final class ColumnProjectionUtils {
     conf.setBoolean(READ_ALL_COLUMNS, false);
   }
 
+  /**
+   * This method appends read column information to configuration to use for PPD. It is
+   * currently called with information from TSOP. Names come from TSOP input RowSchema, and
+   * IDs are the indexes inside the schema (which PPD assumes correspond to indexes inside the
+   * files to PPD in; something that would be invalid in many cases of schema evolution).
+   * @param conf Config to set values to.
+   * @param ids Column ids.
+   * @param names Column names.
+   */
   public static void appendReadColumns(
       Configuration conf, List<Integer> ids, List<String> names) {
+    if (ids.size() != names.size()) {
+      LOG.warn("Read column counts do not match: "
+          + ids.size() + " ids, " + names.size() + " names");
+    }
     appendReadColumns(conf, ids);
     appendReadColumnNames(conf, names);
   }
@@ -125,9 +143,13 @@ public final class ColumnProjectionUtils {
     List<Integer> result = new ArrayList<Integer>(list.length);
     for (String element : list) {
       // it may contain duplicates, remove duplicates
+      // TODO: WTF? This would break many assumptions elsewhere if it did.
+      //       Column names' and column ids' lists are supposed to be correlated.
       Integer toAdd = Integer.parseInt(element);
       if (!result.contains(toAdd)) {
         result.add(toAdd);
+      } else if (LOG.isInfoEnabled()) {
+        LOG.info("Duplicate ID " + toAdd + " in column ID list");
       }
     }
     return result;

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
index 0778935..8fda95c 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java
@@ -19,10 +19,13 @@
 package org.apache.hadoop.hive.ql.io.sarg;
 
 /**
- * A factory for creating SearchArguments.
+ * A factory for creating SearchArguments, as well as modifying those created by this factory.
  */
 public class SearchArgumentFactory {
   public static SearchArgument.Builder newBuilder() {
     return new SearchArgumentImpl.BuilderImpl();
   }
+  public static void setPredicateLeafColumn(PredicateLeaf leaf, String newName) {
+    SearchArgumentImpl.PredicateLeafImpl.setColumnName(leaf, newName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ba0b33c1/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
index d27ac16..a762b8b 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
@@ -40,7 +40,7 @@ final class SearchArgumentImpl implements SearchArgument {
   static final class PredicateLeafImpl implements PredicateLeaf {
     private final Operator operator;
     private final Type type;
-    private final String columnName;
+    private String columnName;
     private final Object literal;
     private final List<Object> literalList;
 
@@ -165,6 +165,11 @@ final class SearchArgumentImpl implements SearchArgument {
              (literalList == null ? 0 : literalList.hashCode()) *
                  103 * 101 * 3 * 17;
     }
+
+    public static void setColumnName(PredicateLeaf leaf, String newName) {
+      assert leaf instanceof PredicateLeafImpl;
+      ((PredicateLeafImpl)leaf).columnName = newName;
+    }
   }
 
 


[17/41] hive git commit: HIVE-11678 : Add AggregateProjectMergeRule (Ashutosh Chauhan via Jesus Camacho Rodriguez)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1cce5f00/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 36f1099..7d9d99e 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -398,28 +398,28 @@ Stage-0
             compressed:false
             Statistics:Num rows: 10 Data size: 885 Basic stats: COMPLETE Column stats: COMPLETE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Select Operator [SEL_6]
+            Select Operator [SEL_5]
                outputColumnNames:["_col0","_col1","_col2"]
                Statistics:Num rows: 10 Data size: 885 Basic stats: COMPLETE Column stats: COMPLETE
-               Group By Operator [GBY_5]
+               Group By Operator [GBY_4]
                |  aggregations:["sum(VALUE._col0)"]
-               |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
+               |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
                |  outputColumnNames:["_col0","_col1","_col2","_col3"]
                |  Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                |<-Map 1 [SIMPLE_EDGE]
-                  Reduce Output Operator [RS_4]
-                     key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                     Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
+                  Reduce Output Operator [RS_3]
+                     key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                     Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
                      sort order:+++
                      Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                      value expressions:_col3 (type: bigint)
-                     Group By Operator [GBY_3]
-                        aggregations:["sum(_col1)"]
-                        keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
+                     Group By Operator [GBY_2]
+                        aggregations:["sum(c_int)"]
+                        keys:key (type: string), c_int (type: int), c_float (type: float)
                         outputColumnNames:["_col0","_col1","_col2","_col3"]
                         Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                         Select Operator [SEL_1]
-                           outputColumnNames:["_col0","_col1","_col2"]
+                           outputColumnNames:["key","c_int","c_float"]
                            Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
                            TableScan [TS_0]
                               alias:cbo_t1
@@ -464,28 +464,28 @@ Stage-0
                         keys:_col0 (type: bigint), _col1 (type: float)
                         outputColumnNames:["_col0","_col1","_col2"]
                         Statistics:Num rows: 5 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE
-                        Select Operator [SEL_6]
+                        Select Operator [SEL_5]
                            outputColumnNames:["_col0","_col1"]
                            Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
-                           Group By Operator [GBY_5]
+                           Group By Operator [GBY_4]
                            |  aggregations:["sum(VALUE._col0)"]
-                           |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
+                           |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
                            |  outputColumnNames:["_col0","_col1","_col2","_col3"]
                            |  Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                            |<-Map 1 [SIMPLE_EDGE]
-                              Reduce Output Operator [RS_4]
-                                 key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                 Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
+                              Reduce Output Operator [RS_3]
+                                 key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                 Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
                                  sort order:+++
                                  Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                                  value expressions:_col3 (type: bigint)
-                                 Group By Operator [GBY_3]
-                                    aggregations:["sum(_col1)"]
-                                    keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
+                                 Group By Operator [GBY_2]
+                                    aggregations:["sum(c_int)"]
+                                    keys:key (type: string), c_int (type: int), c_float (type: float)
                                     outputColumnNames:["_col0","_col1","_col2","_col3"]
                                     Statistics:Num rows: 10 Data size: 917 Basic stats: COMPLETE Column stats: COMPLETE
                                     Select Operator [SEL_1]
-                                       outputColumnNames:["_col0","_col1","_col2"]
+                                       outputColumnNames:["key","c_int","c_float"]
                                        Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
                                        TableScan [TS_0]
                                           alias:cbo_t1
@@ -525,152 +525,149 @@ Stage-0
                   sort order:-+
                   Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions:_col0 (type: int), _col2 (type: bigint)
-                  Group By Operator [GBY_41]
-                  |  aggregations:["count(VALUE._col0)"]
-                  |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
-                  |  outputColumnNames:["_col0","_col1","_col2"]
-                  |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                  |<-Reducer 5 [SIMPLE_EDGE]
-                     Reduce Output Operator [RS_40]
-                        key expressions:_col0 (type: int), _col1 (type: bigint)
-                        Map-reduce partition columns:_col0 (type: int), _col1 (type: bigint)
-                        sort order:++
-                        Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions:_col2 (type: bigint)
-                        Group By Operator [GBY_39]
-                           aggregations:["count()"]
-                           keys:_col0 (type: int), _col1 (type: bigint)
-                           outputColumnNames:["_col0","_col1","_col2"]
+                  Select Operator [SEL_41]
+                     outputColumnNames:["_col0","_col1","_col2"]
+                     Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     Group By Operator [GBY_40]
+                     |  aggregations:["count(VALUE._col0)"]
+                     |  keys:KEY._col0 (type: bigint), KEY._col1 (type: int)
+                     |  outputColumnNames:["_col0","_col1","_col2"]
+                     |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     |<-Reducer 5 [SIMPLE_EDGE]
+                        Reduce Output Operator [RS_39]
+                           key expressions:_col0 (type: bigint), _col1 (type: int)
+                           Map-reduce partition columns:_col0 (type: bigint), _col1 (type: int)
+                           sort order:++
                            Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                           Select Operator [SEL_37]
-                              outputColumnNames:["_col0","_col1"]
-                              Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
-                              Filter Operator [FIL_36]
-                                 predicate:((_col1 > 0) or (_col6 >= 0)) (type: boolean)
+                           value expressions:_col2 (type: bigint)
+                           Group By Operator [GBY_38]
+                              aggregations:["count()"]
+                              keys:_col2 (type: bigint), _col6 (type: int)
+                              outputColumnNames:["_col0","_col1","_col2"]
+                              Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                              Select Operator [SEL_37]
+                                 outputColumnNames:["_col2","_col6"]
                                  Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
-                                 Merge Join Operator [MERGEJOIN_55]
-                                 |  condition map:[{"":"Inner Join 0 to 1"}]
-                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                 |  outputColumnNames:["_col1","_col2","_col6"]
-                                 |  Statistics:Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |<-Map 11 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_34]
-                                 |     key expressions:_col0 (type: string)
-                                 |     Map-reduce partition columns:_col0 (type: string)
-                                 |     sort order:+
-                                 |     Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |     value expressions:_col1 (type: int)
-                                 |     Select Operator [SEL_30]
-                                 |        outputColumnNames:["_col0","_col1"]
-                                 |        Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |        Filter Operator [FIL_53]
-                                 |           predicate:key is not null (type: boolean)
-                                 |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |           TableScan [TS_29]
-                                 |              alias:cbo_t3
-                                 |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |<-Reducer 4 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_32]
-                                       key expressions:_col0 (type: string)
-                                       Map-reduce partition columns:_col0 (type: string)
-                                       sort order:+
-                                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                       value expressions:_col1 (type: int), _col2 (type: bigint)
-                                       Select Operator [SEL_28]
-                                          outputColumnNames:["_col0","_col1","_col2"]
+                                 Filter Operator [FIL_36]
+                                    predicate:((_col1 > 0) or (_col6 >= 0)) (type: boolean)
+                                    Statistics:Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Merge Join Operator [MERGEJOIN_55]
+                                    |  condition map:[{"":"Inner Join 0 to 1"}]
+                                    |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                    |  outputColumnNames:["_col1","_col2","_col6"]
+                                    |  Statistics:Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |<-Map 11 [SIMPLE_EDGE]
+                                    |  Reduce Output Operator [RS_34]
+                                    |     key expressions:_col0 (type: string)
+                                    |     Map-reduce partition columns:_col0 (type: string)
+                                    |     sort order:+
+                                    |     Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |     value expressions:_col1 (type: int)
+                                    |     Select Operator [SEL_30]
+                                    |        outputColumnNames:["_col0","_col1"]
+                                    |        Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |        Filter Operator [FIL_53]
+                                    |           predicate:key is not null (type: boolean)
+                                    |           Statistics:Num rows: 18 Data size: 1424 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |           TableScan [TS_29]
+                                    |              alias:cbo_t3
+                                    |              Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |<-Reducer 4 [SIMPLE_EDGE]
+                                       Reduce Output Operator [RS_32]
+                                          key expressions:_col0 (type: string)
+                                          Map-reduce partition columns:_col0 (type: string)
+                                          sort order:+
                                           Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                          Filter Operator [FIL_27]
-                                             predicate:((_col1 + _col4) >= 0) (type: boolean)
+                                          value expressions:_col1 (type: int), _col2 (type: bigint)
+                                          Select Operator [SEL_28]
+                                             outputColumnNames:["_col0","_col1","_col2"]
                                              Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             Merge Join Operator [MERGEJOIN_54]
-                                             |  condition map:[{"":"Inner Join 0 to 1"}]
-                                             |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                             |  outputColumnNames:["_col0","_col1","_col2","_col4"]
-                                             |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |<-Reducer 10 [SIMPLE_EDGE]
-                                             |  Reduce Output Operator [RS_25]
-                                             |     key expressions:_col0 (type: string)
-                                             |     Map-reduce partition columns:_col0 (type: string)
-                                             |     sort order:+
-                                             |     Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |     value expressions:_col1 (type: int)
-                                             |     Select Operator [SEL_20]
-                                             |     |  outputColumnNames:["_col0","_col1"]
-                                             |     |  Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |     |<-Reducer 9 [SIMPLE_EDGE]
-                                             |        Reduce Output Operator [RS_19]
-                                             |           key expressions:_col3 (type: double), _col2 (type: bigint)
-                                             |           sort order:-+
-                                             |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |           value expressions:_col0 (type: string), _col1 (type: int)
-                                             |           Select Operator [SEL_18]
-                                             |              outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |              Group By Operator [GBY_17]
-                                             |              |  aggregations:["sum(VALUE._col0)"]
-                                             |              |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                                             |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |              |<-Map 8 [SIMPLE_EDGE]
-                                             |                 Reduce Output Operator [RS_16]
-                                             |                    key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                    Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                    sort order:+++
-                                             |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                    value expressions:_col3 (type: bigint)
-                                             |                    Group By Operator [GBY_15]
-                                             |                       aggregations:["sum(_col1)"]
-                                             |                       keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                       Select Operator [SEL_13]
-                                             |                          outputColumnNames:["_col0","_col1","_col2"]
-                                             |                          Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                          Filter Operator [FIL_52]
-                                             |                             predicate:((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and key is not null) (type: boolean)
-                                             |                             Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                             TableScan [TS_11]
-                                             |                                alias:cbo_t2
-                                             |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |<-Reducer 3 [SIMPLE_EDGE]
-                                                Reduce Output Operator [RS_23]
-                                                   key expressions:_col0 (type: string)
-                                                   Map-reduce partition columns:_col0 (type: string)
-                                                   sort order:+
-                                                   Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                                   value expressions:_col1 (type: int), _col2 (type: bigint)
-                                                   Select Operator [SEL_9]
-                                                   |  outputColumnNames:["_col0","_col1","_col2"]
-                                                   |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                                   |<-Reducer 2 [SIMPLE_EDGE]
-                                                      Reduce Output Operator [RS_8]
-                                                         key expressions:_col0 (type: string)
-                                                         sort order:+
-                                                         Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                                         value expressions:_col1 (type: int), _col2 (type: bigint)
-                                                         Select Operator [SEL_7]
-                                                            outputColumnNames:["_col0","_col1","_col2"]
+                                             Filter Operator [FIL_27]
+                                                predicate:((_col1 + _col4) >= 0) (type: boolean)
+                                                Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Merge Join Operator [MERGEJOIN_54]
+                                                |  condition map:[{"":"Inner Join 0 to 1"}]
+                                                |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                                |  outputColumnNames:["_col0","_col1","_col2","_col4"]
+                                                |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |<-Reducer 10 [SIMPLE_EDGE]
+                                                |  Reduce Output Operator [RS_25]
+                                                |     key expressions:_col0 (type: string)
+                                                |     Map-reduce partition columns:_col0 (type: string)
+                                                |     sort order:+
+                                                |     Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |     value expressions:_col1 (type: int)
+                                                |     Select Operator [SEL_20]
+                                                |     |  outputColumnNames:["_col0","_col1"]
+                                                |     |  Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |     |<-Reducer 9 [SIMPLE_EDGE]
+                                                |        Reduce Output Operator [RS_19]
+                                                |           key expressions:_col3 (type: double), _col2 (type: bigint)
+                                                |           sort order:-+
+                                                |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |           value expressions:_col0 (type: string), _col1 (type: int)
+                                                |           Select Operator [SEL_17]
+                                                |              outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |              Group By Operator [GBY_16]
+                                                |              |  aggregations:["sum(VALUE._col0)"]
+                                                |              |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                                                |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |              |<-Map 8 [SIMPLE_EDGE]
+                                                |                 Reduce Output Operator [RS_15]
+                                                |                    key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                |                    Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                |                    sort order:+++
+                                                |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                    value expressions:_col3 (type: bigint)
+                                                |                    Group By Operator [GBY_14]
+                                                |                       aggregations:["sum(c_int)"]
+                                                |                       keys:key (type: string), c_int (type: int), c_float (type: float)
+                                                |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                       Filter Operator [FIL_52]
+                                                |                          predicate:((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and key is not null) (type: boolean)
+                                                |                          Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                          TableScan [TS_11]
+                                                |                             alias:cbo_t2
+                                                |                             Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |<-Reducer 3 [SIMPLE_EDGE]
+                                                   Reduce Output Operator [RS_23]
+                                                      key expressions:_col0 (type: string)
+                                                      Map-reduce partition columns:_col0 (type: string)
+                                                      sort order:+
+                                                      Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      value expressions:_col1 (type: int), _col2 (type: bigint)
+                                                      Select Operator [SEL_9]
+                                                      |  outputColumnNames:["_col0","_col1","_col2"]
+                                                      |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      |<-Reducer 2 [SIMPLE_EDGE]
+                                                         Reduce Output Operator [RS_8]
+                                                            key expressions:_col0 (type: string)
+                                                            sort order:+
                                                             Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                                            Group By Operator [GBY_6]
-                                                            |  aggregations:["sum(VALUE._col0)"]
-                                                            |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                                                            |  outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                                            |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                                            |<-Map 1 [SIMPLE_EDGE]
-                                                               Reduce Output Operator [RS_5]
-                                                                  key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                                  Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                                  sort order:+++
-                                                                  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                                                  value expressions:_col3 (type: bigint)
-                                                                  Group By Operator [GBY_4]
-                                                                     aggregations:["sum(_col1)"]
-                                                                     keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                                     outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                            value expressions:_col1 (type: int), _col2 (type: bigint)
+                                                            Select Operator [SEL_6]
+                                                               outputColumnNames:["_col0","_col1","_col2"]
+                                                               Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                                                               Group By Operator [GBY_5]
+                                                               |  aggregations:["sum(VALUE._col0)"]
+                                                               |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                                                               |  outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                               |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                               |<-Map 1 [SIMPLE_EDGE]
+                                                                  Reduce Output Operator [RS_4]
+                                                                     key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                                     Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                                     sort order:+++
                                                                      Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                                                     Select Operator [SEL_2]
-                                                                        outputColumnNames:["_col0","_col1","_col2"]
-                                                                        Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                     value expressions:_col3 (type: bigint)
+                                                                     Group By Operator [GBY_3]
+                                                                        aggregations:["sum(c_int)"]
+                                                                        keys:key (type: string), c_int (type: int), c_float (type: float)
+                                                                        outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                                        Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
                                                                         Filter Operator [FIL_51]
                                                                            predicate:((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and key is not null) (type: boolean)
                                                                            Statistics:Num rows: 3 Data size: 279 Basic stats: COMPLETE Column stats: COMPLETE
@@ -711,143 +708,146 @@ Stage-0
                   sort order:+-
                   Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions:_col1 (type: bigint), _col2 (type: bigint)
-                  Group By Operator [GBY_40]
-                  |  aggregations:["count(VALUE._col0)"]
-                  |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
-                  |  outputColumnNames:["_col0","_col1","_col2"]
-                  |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                  |<-Reducer 5 [SIMPLE_EDGE]
-                     Reduce Output Operator [RS_39]
-                        key expressions:_col0 (type: int), _col1 (type: bigint)
-                        Map-reduce partition columns:_col0 (type: int), _col1 (type: bigint)
-                        sort order:++
-                        Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions:_col2 (type: bigint)
-                        Group By Operator [GBY_38]
-                           aggregations:["count()"]
-                           keys:_col0 (type: int), _col1 (type: bigint)
-                           outputColumnNames:["_col0","_col1","_col2"]
+                  Select Operator [SEL_40]
+                     outputColumnNames:["_col0","_col1","_col2"]
+                     Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     Group By Operator [GBY_39]
+                     |  aggregations:["count(VALUE._col0)"]
+                     |  keys:KEY._col0 (type: bigint), KEY._col1 (type: int)
+                     |  outputColumnNames:["_col0","_col1","_col2"]
+                     |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     |<-Reducer 5 [SIMPLE_EDGE]
+                        Reduce Output Operator [RS_38]
+                           key expressions:_col0 (type: bigint), _col1 (type: int)
+                           Map-reduce partition columns:_col0 (type: bigint), _col1 (type: int)
+                           sort order:++
                            Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                           Select Operator [SEL_34]
-                              outputColumnNames:["_col0","_col1"]
-                              Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-                              Filter Operator [FIL_48]
-                                 predicate:((((_col6 > 0) and ((_col6 >= 1) or (_col2 >= 1))) and ((UDFToLong(_col6) + _col2) >= 0)) and ((_col1 > 0) or (_col6 >= 0))) (type: boolean)
+                           value expressions:_col2 (type: bigint)
+                           Group By Operator [GBY_37]
+                              aggregations:["count()"]
+                              keys:_col2 (type: bigint), _col6 (type: int)
+                              outputColumnNames:["_col0","_col1","_col2"]
+                              Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                              Select Operator [SEL_34]
+                                 outputColumnNames:["_col2","_col6"]
                                  Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
-                                 Merge Join Operator [MERGEJOIN_53]
-                                 |  condition map:[{"":"Left Outer Join0 to 1"}]
-                                 |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                 |  outputColumnNames:["_col1","_col2","_col6"]
-                                 |  Statistics:Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |<-Map 10 [SIMPLE_EDGE]
-                                 |  Reduce Output Operator [RS_31]
-                                 |     key expressions:_col0 (type: string)
-                                 |     Map-reduce partition columns:_col0 (type: string)
-                                 |     sort order:+
-                                 |     Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |     value expressions:_col1 (type: int)
-                                 |     Select Operator [SEL_29]
-                                 |        outputColumnNames:["_col0","_col1"]
-                                 |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |        TableScan [TS_28]
-                                 |           alias:cbo_t3
-                                 |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                                 |<-Reducer 4 [SIMPLE_EDGE]
-                                    Reduce Output Operator [RS_30]
-                                       key expressions:_col0 (type: string)
-                                       Map-reduce partition columns:_col0 (type: string)
-                                       sort order:+
-                                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                       value expressions:_col1 (type: int), _col2 (type: bigint)
-                                       Select Operator [SEL_27]
-                                          outputColumnNames:["_col0","_col1","_col2"]
+                                 Filter Operator [FIL_48]
+                                    predicate:((((_col6 > 0) and ((_col6 >= 1) or (_col2 >= 1))) and ((UDFToLong(_col6) + _col2) >= 0)) and ((_col1 > 0) or (_col6 >= 0))) (type: boolean)
+                                    Statistics:Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                                    Merge Join Operator [MERGEJOIN_53]
+                                    |  condition map:[{"":"Left Outer Join0 to 1"}]
+                                    |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                    |  outputColumnNames:["_col1","_col2","_col6"]
+                                    |  Statistics:Num rows: 4 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |<-Map 10 [SIMPLE_EDGE]
+                                    |  Reduce Output Operator [RS_31]
+                                    |     key expressions:_col0 (type: string)
+                                    |     Map-reduce partition columns:_col0 (type: string)
+                                    |     sort order:+
+                                    |     Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |     value expressions:_col1 (type: int)
+                                    |     Select Operator [SEL_29]
+                                    |        outputColumnNames:["_col0","_col1"]
+                                    |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |        TableScan [TS_28]
+                                    |           alias:cbo_t3
+                                    |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                    |<-Reducer 4 [SIMPLE_EDGE]
+                                       Reduce Output Operator [RS_30]
+                                          key expressions:_col0 (type: string)
+                                          Map-reduce partition columns:_col0 (type: string)
+                                          sort order:+
                                           Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                          Filter Operator [FIL_26]
-                                             predicate:((_col1 + _col4) >= 0) (type: boolean)
+                                          value expressions:_col1 (type: int), _col2 (type: bigint)
+                                          Select Operator [SEL_27]
+                                             outputColumnNames:["_col0","_col1","_col2"]
                                              Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             Merge Join Operator [MERGEJOIN_52]
-                                             |  condition map:[{"":"Left Outer Join0 to 1"}]
-                                             |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
-                                             |  outputColumnNames:["_col0","_col1","_col2","_col4"]
-                                             |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |<-Reducer 3 [SIMPLE_EDGE]
-                                             |  Reduce Output Operator [RS_23]
-                                             |     key expressions:_col0 (type: string)
-                                             |     Map-reduce partition columns:_col0 (type: string)
-                                             |     sort order:+
-                                             |     Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |     value expressions:_col1 (type: int), _col2 (type: bigint)
-                                             |     Select Operator [SEL_11]
-                                             |     |  outputColumnNames:["_col0","_col1","_col2"]
-                                             |     |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |     |<-Reducer 2 [SIMPLE_EDGE]
-                                             |        Reduce Output Operator [RS_10]
-                                             |           key expressions:_col3 (type: bigint), _col1 (type: int)
-                                             |           sort order:+-
-                                             |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |           value expressions:_col0 (type: string), _col2 (type: bigint)
-                                             |           Select Operator [SEL_9]
-                                             |              outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |              Group By Operator [GBY_8]
-                                             |              |  aggregations:["sum(VALUE._col0)"]
-                                             |              |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                                             |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |              |<-Map 1 [SIMPLE_EDGE]
-                                             |                 Reduce Output Operator [RS_7]
-                                             |                    key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                    Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                    sort order:+++
-                                             |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                    value expressions:_col3 (type: bigint)
-                                             |                    Group By Operator [GBY_6]
-                                             |                       aggregations:["sum(_col1)"]
-                                             |                       keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
-                                             |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                       Select Operator [SEL_2]
-                                             |                          outputColumnNames:["_col0","_col1","_col2"]
-                                             |                          Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                          Filter Operator [FIL_49]
-                                             |                             predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
-                                             |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |                             TableScan [TS_0]
-                                             |                                alias:cbo_t1
-                                             |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                                             |<-Reducer 9 [SIMPLE_EDGE]
-                                                Reduce Output Operator [RS_24]
-                                                   key expressions:_col0 (type: string)
-                                                   Map-reduce partition columns:_col0 (type: string)
-                                                   sort order:+
-                                                   Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                                   value expressions:_col1 (type: int)
-                                                   Select Operator [SEL_22]
-                                                      outputColumnNames:["_col0","_col1"]
+                                             Filter Operator [FIL_26]
+                                                predicate:((_col1 + _col4) >= 0) (type: boolean)
+                                                Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                Merge Join Operator [MERGEJOIN_52]
+                                                |  condition map:[{"":"Left Outer Join0 to 1"}]
+                                                |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)"}
+                                                |  outputColumnNames:["_col0","_col1","_col2","_col4"]
+                                                |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |<-Reducer 3 [SIMPLE_EDGE]
+                                                |  Reduce Output Operator [RS_23]
+                                                |     key expressions:_col0 (type: string)
+                                                |     Map-reduce partition columns:_col0 (type: string)
+                                                |     sort order:+
+                                                |     Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |     value expressions:_col1 (type: int), _col2 (type: bigint)
+                                                |     Select Operator [SEL_11]
+                                                |     |  outputColumnNames:["_col0","_col1","_col2"]
+                                                |     |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |     |<-Reducer 2 [SIMPLE_EDGE]
+                                                |        Reduce Output Operator [RS_10]
+                                                |           key expressions:_col3 (type: bigint), _col1 (type: int)
+                                                |           sort order:+-
+                                                |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |           value expressions:_col0 (type: string), _col2 (type: bigint)
+                                                |           Select Operator [SEL_8]
+                                                |              outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |              Group By Operator [GBY_7]
+                                                |              |  aggregations:["sum(VALUE._col0)"]
+                                                |              |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                                                |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |              |<-Map 1 [SIMPLE_EDGE]
+                                                |                 Reduce Output Operator [RS_6]
+                                                |                    key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                |                    Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                |                    sort order:+++
+                                                |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                    value expressions:_col3 (type: bigint)
+                                                |                    Group By Operator [GBY_5]
+                                                |                       aggregations:["sum(_col2)"]
+                                                |                       keys:_col0 (type: string), _col2 (type: int), _col3 (type: float)
+                                                |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
+                                                |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                       Select Operator [SEL_2]
+                                                |                          outputColumnNames:["_col0","_col2","_col3"]
+                                                |                          Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                          Filter Operator [FIL_49]
+                                                |                             predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                                                |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |                             TableScan [TS_0]
+                                                |                                alias:cbo_t1
+                                                |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                |<-Reducer 9 [SIMPLE_EDGE]
+                                                   Reduce Output Operator [RS_24]
+                                                      key expressions:_col0 (type: string)
+                                                      Map-reduce partition columns:_col0 (type: string)
+                                                      sort order:+
                                                       Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                                      Group By Operator [GBY_21]
-                                                      |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                                                      |  outputColumnNames:["_col0","_col1","_col2"]
-                                                      |  Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                      |<-Map 8 [SIMPLE_EDGE]
-                                                         Reduce Output Operator [RS_20]
-                                                            key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                            Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                            sort order:+++
-                                                            Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                            Group By Operator [GBY_19]
-                                                               keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                                               outputColumnNames:["_col0","_col1","_col2"]
+                                                      value expressions:_col1 (type: int)
+                                                      Select Operator [SEL_21]
+                                                         outputColumnNames:["_col0","_col1"]
+                                                         Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
+                                                         Group By Operator [GBY_20]
+                                                         |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                                                         |  outputColumnNames:["_col0","_col1","_col2"]
+                                                         |  Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                                         |<-Map 8 [SIMPLE_EDGE]
+                                                            Reduce Output Operator [RS_19]
+                                                               key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                               Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                                               sort order:+++
                                                                Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                               Select Operator [SEL_15]
+                                                               Group By Operator [GBY_18]
+                                                                  keys:_col0 (type: string), _col2 (type: int), _col3 (type: float)
                                                                   outputColumnNames:["_col0","_col1","_col2"]
                                                                   Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                                  Filter Operator [FIL_50]
-                                                                     predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                                                                  Select Operator [SEL_15]
+                                                                     outputColumnNames:["_col0","_col2","_col3"]
                                                                      Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                                     TableScan [TS_13]
-                                                                        alias:cbo_t2
-                                                                        Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                     Filter Operator [FIL_50]
+                                                                        predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                                                                        Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                                                        TableScan [TS_13]
+                                                                           alias:cbo_t2
+                                                                           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) cbo_t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) cbo_t2 on cbo_t1.a=p right outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 2) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c
 PREHOOK: type: QUERY
@@ -867,129 +867,132 @@ Stage-0
       limit:-1
       Stage-1
          Reducer 5
-         File Output Operator [FS_36]
+         File Output Operator [FS_35]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Group By Operator [GBY_34]
-            |  aggregations:["count(VALUE._col0)"]
-            |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
-            |  outputColumnNames:["_col0","_col1","_col2"]
-            |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-            |<-Reducer 4 [SIMPLE_EDGE]
-               Reduce Output Operator [RS_33]
-                  key expressions:_col0 (type: int), _col1 (type: bigint)
-                  Map-reduce partition columns:_col0 (type: int), _col1 (type: bigint)
-                  sort order:++
-                  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions:_col2 (type: bigint)
-                  Group By Operator [GBY_32]
-                     aggregations:["count()"]
-                     keys:_col0 (type: int), _col1 (type: bigint)
-                     outputColumnNames:["_col0","_col1","_col2"]
+            Select Operator [SEL_34]
+               outputColumnNames:["_col0","_col1","_col2"]
+               Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+               Group By Operator [GBY_33]
+               |  aggregations:["count(VALUE._col0)"]
+               |  keys:KEY._col0 (type: bigint), KEY._col1 (type: int)
+               |  outputColumnNames:["_col0","_col1","_col2"]
+               |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+               |<-Reducer 4 [SIMPLE_EDGE]
+                  Reduce Output Operator [RS_32]
+                     key expressions:_col0 (type: bigint), _col1 (type: int)
+                     Map-reduce partition columns:_col0 (type: bigint), _col1 (type: int)
+                     sort order:++
                      Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                     Select Operator [SEL_30]
-                        outputColumnNames:["_col0","_col1"]
+                     value expressions:_col2 (type: bigint)
+                     Group By Operator [GBY_31]
+                        aggregations:["count()"]
+                        keys:_col2 (type: bigint), _col6 (type: int)
+                        outputColumnNames:["_col0","_col1","_col2"]
                         Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                        Filter Operator [FIL_29]
-                           predicate:(((_col1 + _col4) >= 2) and ((_col1 > 0) or (_col6 >= 0))) (type: boolean)
+                        Select Operator [SEL_30]
+                           outputColumnNames:["_col2","_col6"]
                            Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                           Merge Join Operator [MERGEJOIN_41]
-                           |  condition map:[{"":"Right Outer Join0 to 1"},{"":"Right Outer Join0 to 2"}]
-                           |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
-                           |  outputColumnNames:["_col1","_col2","_col4","_col6"]
-                           |  Statistics:Num rows: 4 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
-                           |<-Map 8 [SIMPLE_EDGE]
-                           |  Reduce Output Operator [RS_27]
-                           |     key expressions:_col0 (type: string)
-                           |     Map-reduce partition columns:_col0 (type: string)
-                           |     sort order:+
-                           |     Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                           |     value expressions:_col1 (type: int)
-                           |     Select Operator [SEL_24]
-                           |        outputColumnNames:["_col0","_col1"]
-                           |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
-                           |        TableScan [TS_23]
-                           |           alias:cbo_t3
-                           |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                           |<-Reducer 3 [SIMPLE_EDGE]
-                           |  Reduce Output Operator [RS_25]
-                           |     key expressions:_col0 (type: string)
-                           |     Map-reduce partition columns:_col0 (type: string)
-                           |     sort order:+
-                           |     Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                           |     value expressions:_col1 (type: int), _col2 (type: bigint)
-                           |     Select Operator [SEL_11]
-                           |     |  outputColumnNames:["_col0","_col1","_col2"]
-                           |     |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
-                           |     |<-Reducer 2 [SIMPLE_EDGE]
-                           |        Reduce Output Operator [RS_10]
-                           |           key expressions:_col3 (type: bigint), _col0 (type: string)
-                           |           sort order:+-
-                           |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                           |           value expressions:_col1 (type: int), _col2 (type: bigint)
-                           |           Select Operator [SEL_9]
-                           |              outputColumnNames:["_col0","_col1","_col2","_col3"]
-                           |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
-                           |              Group By Operator [GBY_8]
-                           |              |  aggregations:["sum(VALUE._col0)"]
-                           |              |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                           |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
-                           |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                           |              |<-Map 1 [SIMPLE_EDGE]
-                           |                 Reduce Output Operator [RS_7]
-                           |                    key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                           |                    Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                           |                    sort order:+++
-                           |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                           |                    value expressions:_col3 (type: bigint)
-                           |                    Group By Operator [GBY_6]
-                           |                       aggregations:["sum(_col1)"]
-                           |                       keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                           |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
-                           |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
-                           |                       Select Operator [SEL_2]
-                           |                          outputColumnNames:["_col0","_col1","_col2"]
-                           |                          Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                           |                          Filter Operator [FIL_39]
-                           |                             predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
-                           |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                           |                             TableScan [TS_0]
-                           |                                alias:cbo_t1
-                           |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
-                           |<-Reducer 7 [SIMPLE_EDGE]
-                              Reduce Output Operator [RS_26]
-                                 key expressions:_col0 (type: string)
-                                 Map-reduce partition columns:_col0 (type: string)
-                                 sort order:+
-                                 Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                 value expressions:_col1 (type: int)
-                                 Select Operator [SEL_22]
-                                    outputColumnNames:["_col0","_col1"]
+                           Filter Operator [FIL_29]
+                              predicate:(((_col1 + _col4) >= 2) and ((_col1 > 0) or (_col6 >= 0))) (type: boolean)
+                              Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                              Merge Join Operator [MERGEJOIN_40]
+                              |  condition map:[{"":"Right Outer Join0 to 1"},{"":"Right Outer Join0 to 2"}]
+                              |  keys:{"0":"_col0 (type: string)","1":"_col0 (type: string)","2":"_col0 (type: string)"}
+                              |  outputColumnNames:["_col1","_col2","_col4","_col6"]
+                              |  Statistics:Num rows: 4 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE
+                              |<-Map 8 [SIMPLE_EDGE]
+                              |  Reduce Output Operator [RS_27]
+                              |     key expressions:_col0 (type: string)
+                              |     Map-reduce partition columns:_col0 (type: string)
+                              |     sort order:+
+                              |     Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
+                              |     value expressions:_col1 (type: int)
+                              |     Select Operator [SEL_24]
+                              |        outputColumnNames:["_col0","_col1"]
+                              |        Statistics:Num rows: 20 Data size: 1602 Basic stats: COMPLETE Column stats: COMPLETE
+                              |        TableScan [TS_23]
+                              |           alias:cbo_t3
+                              |           Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              |<-Reducer 3 [SIMPLE_EDGE]
+                              |  Reduce Output Operator [RS_25]
+                              |     key expressions:_col0 (type: string)
+                              |     Map-reduce partition columns:_col0 (type: string)
+                              |     sort order:+
+                              |     Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                              |     value expressions:_col1 (type: int), _col2 (type: bigint)
+                              |     Select Operator [SEL_11]
+                              |     |  outputColumnNames:["_col0","_col1","_col2"]
+                              |     |  Statistics:Num rows: 1 Data size: 97 Basic stats: COMPLETE Column stats: COMPLETE
+                              |     |<-Reducer 2 [SIMPLE_EDGE]
+                              |        Reduce Output Operator [RS_10]
+                              |           key expressions:_col3 (type: bigint), _col0 (type: string)
+                              |           sort order:+-
+                              |           Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                              |           value expressions:_col1 (type: int), _col2 (type: bigint)
+                              |           Select Operator [SEL_8]
+                              |              outputColumnNames:["_col0","_col1","_col2","_col3"]
+                              |              Statistics:Num rows: 1 Data size: 105 Basic stats: COMPLETE Column stats: COMPLETE
+                              |              Group By Operator [GBY_7]
+                              |              |  aggregations:["sum(VALUE._col0)"]
+                              |              |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                              |              |  outputColumnNames:["_col0","_col1","_col2","_col3"]
+                              |              |  Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                              |              |<-Map 1 [SIMPLE_EDGE]
+                              |                 Reduce Output Operator [RS_6]
+                              |                    key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                              |                    Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                              |                    sort order:+++
+                              |                    Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                    value expressions:_col3 (type: bigint)
+                              |                    Group By Operator [GBY_5]
+                              |                       aggregations:["sum(_col2)"]
+                              |                       keys:_col0 (type: string), _col2 (type: int), _col3 (type: float)
+                              |                       outputColumnNames:["_col0","_col1","_col2","_col3"]
+                              |                       Statistics:Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                       Select Operator [SEL_2]
+                              |                          outputColumnNames:["_col0","_col2","_col3"]
+                              |                          Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                          Filter Operator [FIL_38]
+                              |                             predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                              |                             Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                              |                             TableScan [TS_0]
+                              |                                alias:cbo_t1
+                              |                                Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                              |<-Reducer 7 [SIMPLE_EDGE]
+                                 Reduce Output Operator [RS_26]
+                                    key expressions:_col0 (type: string)
+                                    Map-reduce partition columns:_col0 (type: string)
+                                    sort order:+
                                     Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
-                                    Group By Operator [GBY_21]
-                                    |  keys:KEY._col0 (type: float), KEY._col1 (type: int), KEY._col2 (type: string)
-                                    |  outputColumnNames:["_col0","_col1","_col2"]
-                                    |  Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                    |<-Map 6 [SIMPLE_EDGE]
-                                       Reduce Output Operator [RS_20]
-                                          key expressions:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                          Map-reduce partition columns:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                          sort order:+++
-                                          Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                          Group By Operator [GBY_19]
-                                             keys:_col0 (type: float), _col1 (type: int), _col2 (type: string)
-                                             outputColumnNames:["_col0","_col1","_col2"]
+                                    value expressions:_col1 (type: int)
+                                    Select Operator [SEL_21]
+                                       outputColumnNames:["_col0","_col1"]
+                                       Statistics:Num rows: 1 Data size: 89 Basic stats: COMPLETE Column stats: COMPLETE
+                                       Group By Operator [GBY_20]
+                                       |  keys:KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: float)
+                                       |  outputColumnNames:["_col0","_col1","_col2"]
+                                       |  Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                       |<-Map 6 [SIMPLE_EDGE]
+                                          Reduce Output Operator [RS_19]
+                                             key expressions:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                             Map-reduce partition columns:_col0 (type: string), _col1 (type: int), _col2 (type: float)
+                                             sort order:+++
                                              Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                             Select Operator [SEL_15]
+                                             Group By Operator [GBY_18]
+                                                keys:_col0 (type: string), _col2 (type: int), _col3 (type: float)
                                                 outputColumnNames:["_col0","_col1","_col2"]
                                                 Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                Filter Operator [FIL_40]
-                                                   predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                                                Select Operator [SEL_15]
+                                                   outputColumnNames:["_col0","_col2","_col3"]
                                                    Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
-                                                   TableScan [TS_13]
-                                                      alias:cbo_t2
-                                                      Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
+                                                   Filter Operator [FIL_39]
+                                                      predicate:((((((c_int + 1) >= 0) and ((c_int > 0) or (c_float >= 0.0))) and (c_float > 0.0)) and ((c_int >= 1) or (c_float >= 1.0))) and ((UDFToFloat(c_int) + c_float) >= 0.0)) (type: boolean)
+                                                      Statistics:Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE
+                                                      TableScan [TS_13]
+                                                         alias:cbo_t2
+                                                         Statistics:Num rows: 20 Data size: 262 Basic stats: COMPLETE Column stats: COMPLETE
 
 PREHOOK: query: explain select cbo_t3.c_int, c, count(*) from (select key as a, c_int+1 as b, sum(c_int) as c from cbo_t1 where (cbo_t1.c_int + 1 >= 0) and (cbo_t1.c_int > 0 or cbo_t1.c_float >= 0)  group by c_float, cbo_t1.c_int, key having cbo_t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) cbo_t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from cbo_t2 where (cbo_t2.c_int + 1 >= 0) and (cbo_t2.c_int > 0 or cbo_t2.c_float >= 0)  group by c_float, cbo_t2.c_int, key having cbo_t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) cbo_t2 on cbo_t1.a=p full outer join cbo_t3 on cbo_t1.a=key where (b + cbo_t2.q >= 0) and (b > 0 or c_int >= 0) group by cbo_t3.c_int, c having cbo_t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by cbo_t3.c_int
 PREHOOK: type: QUERY
@@ -1011,150 +1014,153 @@ Stage-0
       limit:-1
       Stage-1
          Reducer 6
-         File Output Operator [FS_43]
+         File Output Operator [FS_42]
             compressed:false
             Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
             table:{"input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat","serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"}
-            Select Operator [SEL_42]
+            Select Operator [SEL_41]
             |  outputColumnNames:["_col0","_col1","_col2"]
             |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
             |<-Reducer 5 [SIMPLE_EDGE]
-               Reduce Output Operator [RS_41]
+               Reduce Output Operator [RS_40]
                   key expressions:_col0 (type: int)
                   sort order:+
                   Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions:_col1 (type: bigint), _col2 (type: bigint)
-                  Group By Operator [GBY_39]
-                  |  aggregations:["count(VALUE._col0)"]
-                  |  keys:KEY._col0 (type: int), KEY._col1 (type: bigint)
-                  |  outputColumnNames:["_col0","_col1","_col2"]
-                  |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                  |<-Reducer 4 [SIMPLE_EDGE]
-                     Reduce Output Operator [RS_38]
-                        key expressions:_col0 (type: int), _col1 (type: bigint)
-                        Map-reduce partition columns:_col0 (type: int), _col1 (type: bigint)
-                        sort order:++
-                        Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                        value expressions:_col2 (type: bigint)
-                        Group By Operator [GBY_37]
-                           aggregations:["count()"]
-                           keys:_col0 (type: int), _col1 (type: bigint)
-                           outputColumnNames:["_col0","_col1","_col2"]
+                  Select Operator [SEL_39]
+                     outputColumnNames:["_col0","_col1","_col2"]
+                     Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     Group By Operator [GBY_38]
+                     |  aggregations:["count(VALUE._col0)"]
+                     |  keys:KEY._col0 (type: bigint), KEY._col1 (type: int)
+                     |  outputColumnNames:["_col0","_col1","_col2"]
+                     |  Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
+                     |<-Reducer 4 [SIMPLE_EDGE]
+                        Reduce Output Operator [RS_37]
+                           key expressions:_col0 (type: bigint), _col1 (type: int)
+                           Map-reduce partition columns:_col0 (type: bigint), _col1 (type: int)
+                           sort order:++
                            Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                           Select Operator [SEL_33]
-                              outputColumnNames:["_col0","_col1"]
+                           value expressions:_col2 (type: bigint)
+                           Group By Operator [GBY_36]
+                              aggregations:["count()"]
+                              keys:_col2 (type: bigint), _col6 (type: int)
+                              outputColumnNames:["_col0","_col1","_col2"]
                               Statistics:Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
-                              Filter Operator [FIL_45]
-                                 predicate:(((((_col6 > 0) and ((_col6 >= 1) or (_col2 >= 1))) and ((UDFToLong(_col6)

<TRUNCATED>