You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by jd...@apache.org on 2016/03/17 23:47:05 UTC

[01/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Repository: hive
Updated Branches:
  refs/heads/llap 81b26df9e -> 2945c3b2d


http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
index de39d3d..d6dc079 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/DBTokenStore.java
@@ -33,13 +33,14 @@ import org.slf4j.LoggerFactory;
 
 public class DBTokenStore implements DelegationTokenStore {
   private static final Logger LOG = LoggerFactory.getLogger(DBTokenStore.class);
+  private Configuration conf;
 
   @Override
   public int addMasterKey(String s) throws TokenStoreException {
     if (LOG.isTraceEnabled()) {
       LOG.trace("addMasterKey: s = " + s);
     }
-    return (Integer)invokeOnRawStore("addMasterKey", new Object[]{s},String.class);
+    return (Integer)invokeOnTokenStore("addMasterKey", new Object[]{s},String.class);
   }
 
   @Override
@@ -47,19 +48,19 @@ public class DBTokenStore implements DelegationTokenStore {
     if (LOG.isTraceEnabled()) {
       LOG.trace("updateMasterKey: s = " + s + ", keySeq = " + keySeq);
     }
-    invokeOnRawStore("updateMasterKey", new Object[] {Integer.valueOf(keySeq), s},
+    invokeOnTokenStore("updateMasterKey", new Object[] {Integer.valueOf(keySeq), s},
         Integer.class, String.class);
   }
 
   @Override
   public boolean removeMasterKey(int keySeq) {
-    return (Boolean)invokeOnRawStore("removeMasterKey", new Object[] {Integer.valueOf(keySeq)},
+    return (Boolean)invokeOnTokenStore("removeMasterKey", new Object[] {Integer.valueOf(keySeq)},
       Integer.class);
   }
 
   @Override
   public String[] getMasterKeys() throws TokenStoreException {
-    return (String[])invokeOnRawStore("getMasterKeys", new Object[0]);
+    return (String[])invokeOnTokenStore("getMasterKeys", new Object[0]);
   }
 
   @Override
@@ -70,7 +71,7 @@ public class DBTokenStore implements DelegationTokenStore {
       String identifier = TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier);
       String tokenStr = Base64.encodeBase64URLSafeString(
         HiveDelegationTokenSupport.encodeDelegationTokenInformation(token));
-      boolean result = (Boolean)invokeOnRawStore("addToken", new Object[] {identifier, tokenStr},
+      boolean result = (Boolean)invokeOnTokenStore("addToken", new Object[] {identifier, tokenStr},
         String.class, String.class);
       if (LOG.isTraceEnabled()) {
         LOG.trace("addToken: tokenIdentifier = " + tokenIdentifier + ", added = " + result);
@@ -85,7 +86,7 @@ public class DBTokenStore implements DelegationTokenStore {
   public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier)
       throws TokenStoreException {
     try {
-      String tokenStr = (String)invokeOnRawStore("getToken", new Object[] {
+      String tokenStr = (String)invokeOnTokenStore("getToken", new Object[] {
           TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier)}, String.class);
       DelegationTokenInformation result = null;
       if (tokenStr != null) {
@@ -103,7 +104,7 @@ public class DBTokenStore implements DelegationTokenStore {
   @Override
   public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) throws TokenStoreException{
     try {
-      boolean result = (Boolean)invokeOnRawStore("removeToken", new Object[] {
+      boolean result = (Boolean)invokeOnTokenStore("removeToken", new Object[] {
         TokenStoreDelegationTokenSecretManager.encodeWritable(tokenIdentifier)}, String.class);
       if (LOG.isTraceEnabled()) {
         LOG.trace("removeToken: tokenIdentifier = " + tokenIdentifier + ", removed = " + result);
@@ -117,7 +118,7 @@ public class DBTokenStore implements DelegationTokenStore {
   @Override
   public List<DelegationTokenIdentifier> getAllDelegationTokenIdentifiers() throws TokenStoreException{
 
-    List<String> tokenIdents = (List<String>)invokeOnRawStore("getAllTokenIdentifiers", new Object[0]);
+    List<String> tokenIdents = (List<String>)invokeOnTokenStore("getAllTokenIdentifiers", new Object[0]);
     List<DelegationTokenIdentifier> delTokenIdents = new ArrayList<DelegationTokenIdentifier>(tokenIdents.size());
 
     for (String tokenIdent : tokenIdents) {
@@ -132,19 +133,33 @@ public class DBTokenStore implements DelegationTokenStore {
     return delTokenIdents;
   }
 
-  private Object hmsHandler;
+  private Object handler;
+  private ServerMode smode;
 
   @Override
-  public void init(Object hms, ServerMode smode) throws TokenStoreException {
-    this.hmsHandler = hms;
+  public void init(Object handler, ServerMode smode) throws TokenStoreException {
+    this.handler = handler;
+    this.smode = smode;
   }
 
-  private Object invokeOnRawStore(String methName, Object[] params, Class<?> ... paramTypes)
+  private Object invokeOnTokenStore(String methName, Object[] params, Class<?> ... paramTypes)
       throws TokenStoreException{
-
+    Object tokenStore;
     try {
-      Object rawStore = hmsHandler.getClass().getMethod("getMS").invoke(hmsHandler);
-      return rawStore.getClass().getMethod(methName, paramTypes).invoke(rawStore, params);
+      switch (smode) {
+        case METASTORE :
+          tokenStore = handler.getClass().getMethod("getMS").invoke(handler);
+          break;
+        case HIVESERVER2 :
+          Object hiveObject = ((Class<?>)handler)
+            .getMethod("get", org.apache.hadoop.conf.Configuration.class, java.lang.Class.class)
+            .invoke(handler, conf, DBTokenStore.class);
+          tokenStore = ((Class<?>)handler).getMethod("getMSC").invoke(hiveObject);
+          break;
+       default:
+         throw new TokenStoreException(new Exception("unknown server mode"));
+      }
+      return tokenStore.getClass().getMethod(methName, paramTypes).invoke(tokenStore, params);
     } catch (IllegalArgumentException e) {
         throw new TokenStoreException(e);
     } catch (SecurityException e) {
@@ -160,12 +175,12 @@ public class DBTokenStore implements DelegationTokenStore {
 
   @Override
   public void setConf(Configuration conf) {
-    // No-op
+    this.conf = conf;
   }
 
   @Override
   public Configuration getConf() {
-    return null;
+    return conf;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
index 9ecb0ee..b3e4a76 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/thrift/HiveDelegationTokenManager.java
@@ -89,6 +89,7 @@ public class HiveDelegationTokenManager {
         conf.getLong(DELEGATION_TOKEN_GC_INTERVAL, DELEGATION_TOKEN_GC_INTERVAL_DEFAULT);
 
     DelegationTokenStore dts = getTokenStore(conf);
+    dts.setConf(conf);
     dts.init(hms, smode);
     secretManager =
         new TokenStoreDelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime,


[15/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
index c64379d..1aa9727 100644
--- a/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
+++ b/ql/src/test/results/clientpositive/udf_unix_timestamp.q.out
@@ -88,6 +88,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@oneline
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@foo
+POSTHOOK: Lineage: foo.a SIMPLE []
+POSTHOOK: Lineage: foo.b EXPRESSION []
 PREHOOK: query: drop table foo
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@foo

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out
index 9147d4f..993b838 100644
--- a/ql/src/test/results/clientpositive/union24.q.out
+++ b/ql/src/test/results/clientpositive/union24.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src2
+POSTHOOK: Lineage: src2.count EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table src3 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src3
+POSTHOOK: Lineage: src3.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src3.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src4 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src4
+POSTHOOK: Lineage: src4.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src4.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src5 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -42,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src5.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: explain extended
 select s.key, s.count from (
   select key, count from src2  where key < 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out
index 508582b..f023360 100644
--- a/ql/src/test/results/clientpositive/union27.q.out
+++ b/ql/src/test/results/clientpositive/union27.q.out
@@ -10,6 +10,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_same
+POSTHOOK: Lineage: jackson_sev_same.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_same.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table dim_pho as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -20,6 +22,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dim_pho
+POSTHOOK: Lineage: dim_pho.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dim_pho.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table jackson_sev_add as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -30,6 +34,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_add
+POSTHOOK: Lineage: jackson_sev_add.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_add.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union31.q.out b/ql/src/test/results/clientpositive/union31.q.out
index 9b970c0..bb35d5c 100644
--- a/ql/src/test/results/clientpositive/union31.q.out
+++ b/ql/src/test/results/clientpositive/union31.q.out
@@ -20,6 +20,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select * from src where key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -30,6 +32,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t3(key string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -595,6 +599,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -605,6 +611,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.cnt EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table t7(c1 string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union32.q.out b/ql/src/test/results/clientpositive/union32.q.out
index 444a84c..a3fefa8 100644
--- a/ql/src/test/results/clientpositive/union32.q.out
+++ b/ql/src/test/results/clientpositive/union32.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- Test simple union with double
 EXPLAIN
 SELECT * FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/unionDistinct_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/unionDistinct_1.q.out
index d6e82dd..61bfa74 100644
--- a/ql/src/test/results/clientpositive/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/unionDistinct_1.q.out
@@ -8400,6 +8400,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src2
+POSTHOOK: Lineage: src2.count EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table src3 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -8410,6 +8412,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src3
+POSTHOOK: Lineage: src3.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src3.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src4 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -8420,6 +8424,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src4
+POSTHOOK: Lineage: src4.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src4.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src5 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -8430,6 +8436,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src5.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: explain extended
 select s.key, s.count from (
   select key, count from src2  where key < 10
@@ -11968,6 +11976,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_same
+POSTHOOK: Lineage: jackson_sev_same.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_same.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table dim_pho as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -11978,6 +11988,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dim_pho
+POSTHOOK: Lineage: dim_pho.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dim_pho.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table jackson_sev_add as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -11988,6 +12000,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_add
+POSTHOOK: Lineage: jackson_sev_add.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_add.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho UNION DISTINCT select * from jackson_sev_add)b on a.key=b.key and b.key=97
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho UNION DISTINCT select * from jackson_sev_add)b on a.key=b.key and b.key=97
@@ -13396,6 +13410,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select * from src where key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -13406,6 +13422,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t3(key string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -13984,6 +14002,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -13994,6 +14014,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.cnt EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table t7(c1 string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -14309,6 +14331,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -14319,6 +14343,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- Test simple union with double
 EXPLAIN
 SELECT * FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/unionDistinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/unionDistinct_2.q.out b/ql/src/test/results/clientpositive/unionDistinct_2.q.out
index 6d59369..304d74f 100644
--- a/ql/src/test/results/clientpositive/unionDistinct_2.q.out
+++ b/ql/src/test/results/clientpositive/unionDistinct_2.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u1
+POSTHOOK: Lineage: u1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u2 as select key, value from src order by key limit 3
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u2
+POSTHOOK: Lineage: u2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u3 as select key, value from src order by key desc limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u3
+POSTHOOK: Lineage: u3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from u1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union_fast_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_fast_stats.q.out b/ql/src/test/results/clientpositive/union_fast_stats.q.out
index e908ec0..f0879af 100644
--- a/ql/src/test/results/clientpositive/union_fast_stats.q.out
+++ b/ql/src/test/results/clientpositive/union_fast_stats.q.out
@@ -28,6 +28,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -38,6 +50,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -48,6 +72,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -58,6 +94,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -88,6 +136,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: desc formatted small_alltypesorc_a
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@small_alltypesorc_a
@@ -301,6 +361,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -311,6 +383,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -321,6 +405,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -331,6 +427,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -361,6 +469,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: desc formatted small_alltypesorc_a
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_top_level.q.out b/ql/src/test/results/clientpositive/union_top_level.q.out
index 11a3b6a..134c532 100644
--- a/ql/src/test/results/clientpositive/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/union_top_level.q.out
@@ -721,6 +721,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@union_top
+POSTHOOK: Lineage: union_top.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: union_top.value EXPRESSION []
 PREHOOK: query: select * from union_top
 PREHOOK: type: QUERY
 PREHOOK: Input: default@union_top

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/updateAccessTime.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/updateAccessTime.q.out b/ql/src/test/results/clientpositive/updateAccessTime.q.out
index 104e155..05f4d07 100644
--- a/ql/src/test/results/clientpositive/updateAccessTime.q.out
+++ b/ql/src/test/results/clientpositive/updateAccessTime.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tstsrc
+POSTHOOK: Lineage: tstsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: tstsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: desc extended tstsrc
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@tstsrc

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/updateBasicStats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/updateBasicStats.q.out b/ql/src/test/results/clientpositive/updateBasicStats.q.out
index 3f04b99..596de00 100644
--- a/ql/src/test/results/clientpositive/updateBasicStats.q.out
+++ b/ql/src/test/results/clientpositive/updateBasicStats.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select * from s
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from s

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/varchar_nested_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/varchar_nested_types.q.out b/ql/src/test/results/clientpositive/varchar_nested_types.q.out
index 1cd232a..e911425 100644
--- a/ql/src/test/results/clientpositive/varchar_nested_types.q.out
+++ b/ql/src/test/results/clientpositive/varchar_nested_types.q.out
@@ -173,6 +173,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_nested_struct
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_nested_cta
+POSTHOOK: Lineage: varchar_nested_cta.c1 SIMPLE [(varchar_nested_struct)varchar_nested_struct.FieldSchema(name:c1, type:struct<a:int,b:varchar(20),c:string>, comment:null), ]
 PREHOOK: query: describe varchar_nested_cta
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@varchar_nested_cta
@@ -203,6 +204,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_nested_struct
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_nested_view
+POSTHOOK: Lineage: varchar_nested_view.c1 SIMPLE [(varchar_nested_struct)varchar_nested_struct.FieldSchema(name:c1, type:struct<a:int,b:varchar(20),c:string>, comment:null), ]
 PREHOOK: query: describe varchar_nested_view
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@varchar_nested_view

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_columns.q.out b/ql/src/test/results/clientpositive/vector_between_columns.q.out
index d8cc2cc..a4e8d64 100644
--- a/ql/src/test/results/clientpositive/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_columns.q.out
@@ -48,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tsint_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TSINT
+POSTHOOK: Lineage: tsint.csint SIMPLE [(tsint_txt)tsint_txt.FieldSchema(name:csint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: tsint.rnum SIMPLE [(tsint_txt)tsint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tsint_txt.rnum	tsint_txt.csint
 PREHOOK: query: create table TINT stored as orc AS SELECT * FROM TINT_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -59,6 +61,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tint_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TINT
+POSTHOOK: Lineage: tint.cint SIMPLE [(tint_txt)tint_txt.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: tint.rnum SIMPLE [(tint_txt)tint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tint_txt.rnum	tint_txt.cint
 Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: -- We DO NOT expect the following to vectorized because the BETWEEN range expressions

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out
index 3cd1e01..4c3ed71 100644
--- a/ql/src/test/results/clientpositive/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/vector_between_in.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_date_test
+POSTHOOK: Lineage: decimal_date_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
index 19ab74b..000a501 100644
--- a/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/vector_char_mapjoin1.q.out
@@ -98,6 +98,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_vc1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_vc1_orc
+POSTHOOK: Lineage: char_join1_vc1_orc.c1 SIMPLE [(char_join1_vc1)char_join1_vc1.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_vc1_orc.c2 SIMPLE [(char_join1_vc1)char_join1_vc1.FieldSchema(name:c2, type:char(10), comment:null), ]
 PREHOOK: query: create table char_join1_vc2_orc stored as orc as select * from char_join1_vc2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@char_join1_vc2
@@ -108,6 +110,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_vc2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_vc2_orc
+POSTHOOK: Lineage: char_join1_vc2_orc.c1 SIMPLE [(char_join1_vc2)char_join1_vc2.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_vc2_orc.c2 SIMPLE [(char_join1_vc2)char_join1_vc2.FieldSchema(name:c2, type:char(20), comment:null), ]
 PREHOOK: query: create table char_join1_str_orc stored as orc as select * from char_join1_str
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@char_join1_str
@@ -118,6 +122,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_str
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_str_orc
+POSTHOOK: Lineage: char_join1_str_orc.c1 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_str_orc.c2 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join char with same length char
 explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
index 52e015e..2ee396b 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out
@@ -32,6 +32,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL
+POSTHOOK: Lineage: decimal.dec SIMPLE [(decimal_txt)decimal_txt.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
 PREHOOK: query: EXPLAIN
 SELECT dec FROM `DECIMAL` order by dec
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
index eea91bb..537d568 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_3.q.out
@@ -38,6 +38,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_3_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_3
+POSTHOOK: Lineage: decimal_3.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: decimal_3.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
index e0ccbc6..15c9757 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out
@@ -258,6 +258,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_6_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_6_3
+POSTHOOK: Lineage: decimal_6_3.k EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:key, type:decimal(10,5), comment:null), ]
+POSTHOOK: Lineage: decimal_6_3.v EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: desc DECIMAL_6_3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@decimal_6_3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
index 290af26..b022435 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out
@@ -16,6 +16,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_vgby
+POSTHOOK: Lineage: decimal_vgby.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
index f207ba1..3ca326d 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
@@ -12,6 +12,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
index e851aa4..0b70d4c 100644
--- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
 explain 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
index ceb3a58..4207c19 100644
--- a/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/vector_grouping_sets.q.out
@@ -98,6 +98,35 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@store_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@store
+POSTHOOK: Lineage: store.s_city SIMPLE [(store_txt)store_txt.FieldSchema(name:s_city, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_closed_date_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_closed_date_sk, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_company_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_company_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_country SIMPLE [(store_txt)store_txt.FieldSchema(name:s_country, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_county SIMPLE [(store_txt)store_txt.FieldSchema(name:s_county, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_division_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_division_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_floor_space SIMPLE [(store_txt)store_txt.FieldSchema(name:s_floor_space, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_geography_class SIMPLE [(store_txt)store_txt.FieldSchema(name:s_geography_class, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_gmt_offset SIMPLE [(store_txt)store_txt.FieldSchema(name:s_gmt_offset, type:decimal(5,2), comment:null), ]
+POSTHOOK: Lineage: store.s_hours SIMPLE [(store_txt)store_txt.FieldSchema(name:s_hours, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_manager, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_market_desc SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_desc, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_market_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_market_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_manager, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_number_employees SIMPLE [(store_txt)store_txt.FieldSchema(name:s_number_employees, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_rec_end_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_end_date, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_rec_start_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_start_date, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_state SIMPLE [(store_txt)store_txt.FieldSchema(name:s_state, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_id, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_sk, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_street_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_street_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_number, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_street_type SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_type, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_suite_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_suite_number, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_tax_precentage SIMPLE [(store_txt)store_txt.FieldSchema(name:s_tax_precentage, type:decimal(5,2), comment:null), ]
+POSTHOOK: Lineage: store.s_zip SIMPLE [(store_txt)store_txt.FieldSchema(name:s_zip, type:string, comment:null), ]
 PREHOOK: query: explain
 select s_store_id
  from store

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
index 9b6bc6c..2223e81 100644
--- a/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_interval_mapjoin.q.out
@@ -54,6 +54,19 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@vectortab_a_1k
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@vectortab_a_1korc
+POSTHOOK: Lineage: vectortab_a_1korc.b SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.bo SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.d SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.dc SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.dt SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:dt, type:date, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.f SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.i SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.s SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.s2 SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.si SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.t SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.ts SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.ts2 SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
 PREHOOK: query: create table vectortab_b_1k(
             t tinyint,
             si smallint,
@@ -110,6 +123,19 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@vectortab_b_1k
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@vectortab_b_1korc
+POSTHOOK: Lineage: vectortab_b_1korc.b SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.bo SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.d SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.dc SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.dt SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:dt, type:date, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.f SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.i SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.s SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.s2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.si SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.t SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.ts SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.ts2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
 PREHOOK: query: explain
 select
    v1.s,

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join30.q.out b/ql/src/test/results/clientpositive/vector_join30.q.out
index fd5ca72..45ed894 100644
--- a/ql/src/test/results/clientpositive/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/vector_join30.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orcsrc
+POSTHOOK: Lineage: orcsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join_filters.q.out b/ql/src/test/results/clientpositive/vector_join_filters.q.out
index 61e5b2a..999fee7 100644
--- a/ql/src/test/results/clientpositive/vector_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/vector_join_filters.q.out
@@ -28,6 +28,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[21][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_join_nulls.q.out b/ql/src/test/results/clientpositive/vector_join_nulls.q.out
index 7b59cc4..9011a1f 100644
--- a/ql/src/test/results/clientpositive/vector_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/vector_join_nulls.q.out
@@ -28,6 +28,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
index b019822..9836538 100644
--- a/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/vector_leftsemi_mapjoin.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from t1 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -41,6 +43,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(t1)t1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t2 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t2
@@ -72,6 +76,8 @@ POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t3
+POSTHOOK: Lineage: t3.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), (t2)t2.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t3.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), (t2)t2.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t3 sort by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
index 78456c7..e9f106d 100644
--- a/ql/src/test/results/clientpositive/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/vector_multi_insert.q.out
@@ -32,6 +32,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc1
+POSTHOOK: Lineage: orc1.rn EXPRESSION []
 PREHOOK: query: create table orc_rn1 (rn int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_nullsafe_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_nullsafe_join.q.out b/ql/src/test/results/clientpositive/vector_nullsafe_join.q.out
index 56cb050..2090c24 100644
--- a/ql/src/test/results/clientpositive/vector_nullsafe_join.q.out
+++ b/ql/src/test/results/clientpositive/vector_nullsafe_join.q.out
@@ -46,6 +46,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: -- merging
 explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 PREHOOK: type: QUERY


[11/51] [abbrv] hive git commit: HIVE-13222: Move rc-file-v0.rc used on TestRCFile.java to src/test/resources (Sergio Pena, reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-13222: Move rc-file-v0.rc used on TestRCFile.java to src/test/resources (Sergio Pena, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/61b66449
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/61b66449
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/61b66449

Branch: refs/heads/llap
Commit: 61b66449b12ce48fbdfcea696e9a0bdbaa6c4290
Parents: 415373b
Author: Sergio Pena <se...@cloudera.com>
Authored: Wed Mar 9 09:50:09 2016 -0600
Committer: Sergio Pena <se...@cloudera.com>
Committed: Wed Mar 9 09:50:09 2016 -0600

----------------------------------------------------------------------
 ql/src/test/data/rc-file-v0.rc                       | Bin 216 -> 0 bytes
 .../org/apache/hadoop/hive/ql/io/TestRCFile.java     |   3 ++-
 ql/src/test/resources/rc-file-v0.rc                  | Bin 0 -> 216 bytes
 3 files changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/61b66449/ql/src/test/data/rc-file-v0.rc
----------------------------------------------------------------------
diff --git a/ql/src/test/data/rc-file-v0.rc b/ql/src/test/data/rc-file-v0.rc
deleted file mode 100644
index 767d83e..0000000
Binary files a/ql/src/test/data/rc-file-v0.rc and /dev/null differ

http://git-wip-us.apache.org/repos/asf/hive/blob/61b66449/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
index 2914194..19b97e4 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
+import org.apache.hive.common.util.HiveTestUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -391,7 +392,7 @@ public class TestRCFile {
     String[] row = new String[]{"Tester", "Bart", "333 X St.", "Reno", "NV",
                                 "USA"};
     RCFile.Reader reader =
-      new RCFile.Reader(fs, new Path("src/test/data/rc-file-v0.rc"), conf);
+      new RCFile.Reader(fs, new Path(HiveTestUtils.getFileFromClasspath("rc-file-v0.rc")), conf);
     LongWritable rowID = new LongWritable();
     BytesRefArrayWritable cols = new BytesRefArrayWritable();
     assertTrue("old file reader first row", reader.next(rowID));

http://git-wip-us.apache.org/repos/asf/hive/blob/61b66449/ql/src/test/resources/rc-file-v0.rc
----------------------------------------------------------------------
diff --git a/ql/src/test/resources/rc-file-v0.rc b/ql/src/test/resources/rc-file-v0.rc
new file mode 100644
index 0000000..767d83e
Binary files /dev/null and b/ql/src/test/resources/rc-file-v0.rc differ


[43/51] [abbrv] hive git commit: HIVE-13232. Aggressively drop compression buffers in ORC OutStreams. (omalley reviewed by prasanthj)

Posted by jd...@apache.org.
HIVE-13232. Aggressively drop compression buffers in ORC OutStreams.
(omalley reviewed by prasanthj)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4458b1a2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4458b1a2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4458b1a2

Branch: refs/heads/llap
Commit: 4458b1a2b52ce0dfd17504775a016d46a12bc46b
Parents: 8128200
Author: Owen O'Malley <om...@apache.org>
Authored: Fri Mar 11 10:38:34 2016 -0800
Committer: Owen O'Malley <om...@apache.org>
Committed: Tue Mar 15 13:42:29 2016 -0700

----------------------------------------------------------------------
 orc/src/java/org/apache/orc/impl/OutStream.java |  2 +-
 .../test/org/apache/orc/impl/TestOutStream.java | 43 ++++++++++++++++++++
 2 files changed, 44 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4458b1a2/orc/src/java/org/apache/orc/impl/OutStream.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/OutStream.java b/orc/src/java/org/apache/orc/impl/OutStream.java
index 68ef7d4..81662cc 100644
--- a/orc/src/java/org/apache/orc/impl/OutStream.java
+++ b/orc/src/java/org/apache/orc/impl/OutStream.java
@@ -243,8 +243,8 @@ public class OutStream extends PositionedOutputStream {
     if (compressed != null && compressed.position() != 0) {
       compressed.flip();
       receiver.output(compressed);
-      compressed = null;
     }
+    compressed = null;
     uncompressedBytes = 0;
     compressedBytes = 0;
     overflow = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/4458b1a2/orc/src/test/org/apache/orc/impl/TestOutStream.java
----------------------------------------------------------------------
diff --git a/orc/src/test/org/apache/orc/impl/TestOutStream.java b/orc/src/test/org/apache/orc/impl/TestOutStream.java
new file mode 100644
index 0000000..e9614d5
--- /dev/null
+++ b/orc/src/test/org/apache/orc/impl/TestOutStream.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.orc.impl;
+
+import org.apache.orc.CompressionCodec;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestOutStream {
+
+  @Test
+  public void testFlush() throws Exception {
+    OutStream.OutputReceiver receiver =
+        Mockito.mock(OutStream.OutputReceiver.class);
+    CompressionCodec codec = new ZlibCodec();
+    OutStream stream = new OutStream("test", 128*1024, codec, receiver);
+    assertEquals(0L, stream.getBufferSize());
+    stream.write(new byte[]{0, 1, 2});
+    stream.flush();
+    Mockito.verify(receiver).output(Mockito.any(ByteBuffer.class));
+    assertEquals(0L, stream.getBufferSize());
+  }
+}


[39/51] [abbrv] hive git commit: HIVE-9457 : Fix obsolete parameter name in HiveConf description of hive.hashtable.initialCapacity (Shannon Ladymon via Lefty Leverenz)

Posted by jd...@apache.org.
HIVE-9457 : Fix obsolete parameter name in HiveConf description of hive.hashtable.initialCapacity (Shannon Ladymon via Lefty Leverenz)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4cd1101b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4cd1101b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4cd1101b

Branch: refs/heads/llap
Commit: 4cd1101b8a93aaeec5d559b65bcf6252d47535e6
Parents: f07fdfb
Author: LeftyLev <le...@hortonworks.com>
Authored: Mon Mar 14 17:08:32 2016 -0700
Committer: LeftyLev <le...@hortonworks.com>
Committed: Mon Mar 14 17:13:42 2016 -0700

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4cd1101b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 27a56dd..c992433 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1298,7 +1298,7 @@ public class HiveConf extends Configuration {
         " of the number of keys is divided by this value. If the value is 0, statistics are not used" +
         "and hive.hashtable.initialCapacity is used instead."),
     HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " +
-        "mapjoin hashtable if statistics are absent, or if hive.hashtable.stats.key.estimate.adjustment is set to 0"),
+        "mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"),
     HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
     HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
         "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +


[45/51] [abbrv] hive git commit: HIVE-12995 : LLAP: Synthetic file ids need collision checks (Sergey Shelukhin, reviewed by Gopal V)

Posted by jd...@apache.org.
HIVE-12995 : LLAP: Synthetic file ids need collision checks (Sergey Shelukhin, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/26b5c7b5
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/26b5c7b5
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/26b5c7b5

Branch: refs/heads/llap
Commit: 26b5c7b56a4f28ce3eabc0207566cce46b29b558
Parents: 4458b1a
Author: Sergey Shelukhin <se...@apache.org>
Authored: Tue Mar 15 13:49:35 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Tue Mar 15 13:50:14 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   2 +
 .../llap/IncrementalObjectSizeEstimator.java    |   7 +-
 .../apache/hadoop/hive/llap/cache/Cache.java    |  27 ---
 .../hadoop/hive/llap/cache/LowLevelCache.java   |   4 +-
 .../hive/llap/cache/LowLevelCacheImpl.java      |  28 +--
 .../hadoop/hive/llap/cache/NoopCache.java       |  33 ----
 .../hive/llap/io/api/impl/LlapIoImpl.java       |   6 +-
 .../llap/io/decode/EncodedDataConsumer.java     |  77 +-------
 .../llap/io/decode/OrcColumnVectorProducer.java |   8 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   2 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   | 183 ++++---------------
 .../hive/llap/io/metadata/OrcFileMetadata.java  |  33 ++--
 .../hive/llap/io/metadata/OrcMetadataCache.java |  12 +-
 .../llap/io/metadata/OrcStripeMetadata.java     |  19 +-
 orc/src/java/org/apache/orc/FileMetadata.java   |   2 +-
 .../org/apache/hadoop/hive/ql/io/HdfsUtils.java |  18 +-
 .../hadoop/hive/ql/io/SyntheticFileId.java      | 100 ++++++++++
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |  97 ++++++----
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |  46 +++--
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |  32 ++--
 .../hive/ql/io/orc/encoded/OrcBatchKey.java     |  20 +-
 .../hive/ql/io/orc/encoded/OrcCacheKey.java     |  58 ------
 .../hadoop/hive/ql/io/orc/encoded/Reader.java   |  10 +-
 .../hive/ql/io/orc/encoded/ReaderImpl.java      |   4 +-
 .../hive/ql/io/orc/encoded/StreamUtils.java     |   1 -
 .../hive/ql/io/orc/TestInputOutputFormat.java   |  18 +-
 .../apache/hadoop/hive/common/io/DataCache.java |   4 +-
 .../common/io/encoded/EncodedColumnBatch.java   |   9 +-
 28 files changed, 335 insertions(+), 525 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c992433..9fd6648 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1223,6 +1223,8 @@ public class HiveConf extends Configuration {
         "metastore calls if metastore metadata cache is used."),
     HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
         "Include file ID in splits on file systems thaty support it."),
+    HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
+        "Allow synthetic file ID in splits on file systems that don't have a native one."),
     HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
         "Max cache size for keeping meta info about orc splits cached in the client."),
     HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
index d33f724..7d68294 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/IncrementalObjectSizeEstimator.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.llap;
 
+import com.google.common.collect.Lists;
+import com.google.protobuf.UnknownFieldSet;
 import java.lang.reflect.AccessibleObject;
 import java.lang.reflect.Array;
 import java.lang.reflect.Field;
@@ -35,13 +37,11 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator.ObjectEstimator;
 import org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer;
 import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.ql.util.JavaDataModel;
 
-import com.google.common.collect.Lists;
-import com.google.protobuf.UnknownFieldSet;
-
 /**
  * Creates size estimators for java objects. The estimators attempt to do most of the reflection
  * work at initialization time, and also take some shortcuts, to minimize the amount of work done
@@ -622,6 +622,7 @@ public class IncrementalObjectSizeEstimator {
     } catch (ClassNotFoundException e) {
       // Ignore and hope for the best.
       LlapIoImpl.LOG.warn("Cannot find " + className);
+      return;
     }
     IncrementalObjectSizeEstimator.createEstimators(clazz, sizeEstimators);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/cache/Cache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/Cache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/Cache.java
deleted file mode 100644
index cee23a9..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/Cache.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.cache;
-
-import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
-
-/** Dummy interface for now, might be different. */
-public interface Cache<CacheKey> {
-  public ColumnStreamData[] cacheOrGet(CacheKey key, ColumnStreamData[] value);
-  public ColumnStreamData[] get(CacheKey key);
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
index 17d9fdf..1b61a6e 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCache.java
@@ -49,7 +49,7 @@ public interface LowLevelCache {
    *    Some sort of InvalidCacheChunk could be placed to avoid them. TODO
    * @param base base offset for the ranges (stripe/stream offset in case of ORC).
    */
-  DiskRangeList getFileData(long fileId, DiskRangeList range, long baseOffset,
+  DiskRangeList getFileData(Object fileKey, DiskRangeList range, long baseOffset,
       DiskRangeListFactory factory, LowLevelCacheCounters qfCounters, BooleanRef gotAllData);
 
   /**
@@ -57,6 +57,6 @@ public interface LowLevelCache {
    * @return null if all data was put; bitmask indicating which chunks were not put otherwise;
    *         the replacement chunks from cache are updated directly in the array.
    */
-  long[] putFileData(long fileId, DiskRange[] ranges, MemoryBuffer[] chunks,
+  long[] putFileData(Object fileKey, DiskRange[] ranges, MemoryBuffer[] chunks,
       long baseOffset, Priority priority, LowLevelCacheCounters qfCounters);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
index 1132171..a60fed3 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/LowLevelCacheImpl.java
@@ -43,8 +43,8 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
   private final EvictionAwareAllocator allocator;
   private final AtomicInteger newEvictions = new AtomicInteger(0);
   private Thread cleanupThread = null;
-  private final ConcurrentHashMap<Long, FileCache> cache =
-      new ConcurrentHashMap<Long, FileCache>();
+  private final ConcurrentHashMap<Object, FileCache> cache =
+      new ConcurrentHashMap<Object, FileCache>();
   private final LowLevelCachePolicy cachePolicy;
   private final long cleanupInterval;
   private final LlapDaemonCacheMetrics metrics;
@@ -75,11 +75,11 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
   }
 
   @Override
-  public DiskRangeList getFileData(long fileId, DiskRangeList ranges, long baseOffset,
+  public DiskRangeList getFileData(Object fileKey, DiskRangeList ranges, long baseOffset,
       DiskRangeListFactory factory, LowLevelCacheCounters qfCounters, BooleanRef gotAllData) {
     if (ranges == null) return null;
     DiskRangeList prev = ranges.prev;
-    FileCache subCache = cache.get(fileId);
+    FileCache subCache = cache.get(fileKey);
     if (subCache == null || !subCache.incRef()) {
       long totalMissed = ranges.getTotalLength();
       metrics.incrCacheRequestedBytes(totalMissed);
@@ -232,11 +232,11 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
   }
 
   @Override
-  public long[] putFileData(long fileId, DiskRange[] ranges, MemoryBuffer[] buffers,
+  public long[] putFileData(Object fileKey, DiskRange[] ranges, MemoryBuffer[] buffers,
       long baseOffset, Priority priority, LowLevelCacheCounters qfCounters) {
     long[] result = null;
     assert buffers.length == ranges.length;
-    FileCache subCache = getOrAddFileSubCache(fileId);
+    FileCache subCache = getOrAddFileSubCache(fileKey);
     try {
       for (int i = 0; i < ranges.length; ++i) {
         LlapDataBuffer buffer = (LlapDataBuffer)buffers[i];
@@ -260,7 +260,7 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
           }
           if (DebugUtils.isTraceCachingEnabled()) {
             LlapIoImpl.LOG.info("Trying to cache when the chunk is already cached for "
-                + fileId + "@" + offset  + " (base " + baseOffset + "); old " + oldVal
+                + fileKey + "@" + offset  + " (base " + baseOffset + "); old " + oldVal
                 + ", new " + buffer);
           }
           if (DebugUtils.isTraceLockingEnabled()) {
@@ -301,10 +301,10 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
    * All this mess is necessary because we want to be able to remove sub-caches for fully
    * evicted files. It may actually be better to have non-nested map with object keys?
    */
-  private FileCache getOrAddFileSubCache(long fileId) {
+  private FileCache getOrAddFileSubCache(Object fileKey) {
     FileCache newSubCache = null;
     while (true) { // Overwhelmingly executes once.
-      FileCache subCache = cache.get(fileId);
+      FileCache subCache = cache.get(fileKey);
       if (subCache != null) {
         if (subCache.incRef()) return subCache; // Main path - found it, incRef-ed it.
         if (newSubCache == null) {
@@ -312,7 +312,7 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
           newSubCache.incRef();
         }
         // Found a stale value we cannot incRef; try to replace it with new value.
-        if (cache.replace(fileId, subCache, newSubCache)) return newSubCache;
+        if (cache.replace(fileKey, subCache, newSubCache)) return newSubCache;
         continue; // Someone else replaced/removed a stale value, try again.
       }
       // No value found.
@@ -320,11 +320,11 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
         newSubCache = new FileCache();
         newSubCache.incRef();
       }
-      FileCache oldSubCache = cache.putIfAbsent(fileId, newSubCache);
+      FileCache oldSubCache = cache.putIfAbsent(fileKey, newSubCache);
       if (oldSubCache == null) return newSubCache; // Main path 2 - created a new file cache.
       if (oldSubCache.incRef()) return oldSubCache; // Someone created one in parallel.
       // Someone created one in parallel and then it went stale.
-      if (cache.replace(fileId, oldSubCache, newSubCache)) return newSubCache;
+      if (cache.replace(fileKey, oldSubCache, newSubCache)) return newSubCache;
       // Someone else replaced/removed a parallel-added stale value, try again. Max confusion.
     }
   }
@@ -463,7 +463,7 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
       // Iterate thru all the filecaches. This is best-effort.
       // If these super-long-lived iterators affect the map in some bad way,
       // we'd need to sleep once per round instead.
-      Iterator<Map.Entry<Long, FileCache>> iter = cache.entrySet().iterator();
+      Iterator<Map.Entry<Object, FileCache>> iter = cache.entrySet().iterator();
       boolean isPastEndTime = false;
       while (iter.hasNext()) {
         FileCache fc = iter.next().getValue();
@@ -516,7 +516,7 @@ public class LowLevelCacheImpl implements LowLevelCache, BufferUsageManager, Lla
   @Override
   public String debugDumpForOom() {
     StringBuilder sb = new StringBuilder("File cache state ");
-    for (Map.Entry<Long, FileCache> e : cache.entrySet()) {
+    for (Map.Entry<Object, FileCache> e : cache.entrySet()) {
       if (!e.getValue().incRef()) continue;
       try {
         sb.append("\n  file " + e.getKey());

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/cache/NoopCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/NoopCache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cache/NoopCache.java
deleted file mode 100644
index d0461e8..0000000
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cache/NoopCache.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.llap.cache;
-
-import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
-
-public class NoopCache<CacheKey> implements Cache<CacheKey> {
-  @Override
-  public ColumnStreamData[] cacheOrGet(CacheKey key, ColumnStreamData[] value) {
-    return value;
-  }
-
-  @Override
-  public ColumnStreamData[] get(CacheKey key) {
-    return null;  // TODO: ensure real implementation increases refcount
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
index d2c1907..dbee823 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapIoImpl.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.common.io.Allocator;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.cache.BuddyAllocator;
 import org.apache.hadoop.hive.llap.cache.BufferUsageManager;
-import org.apache.hadoop.hive.llap.cache.Cache;
 import org.apache.hadoop.hive.llap.cache.EvictionAwareAllocator;
 import org.apache.hadoop.hive.llap.cache.EvictionDispatcher;
 import org.apache.hadoop.hive.llap.cache.LowLevelCacheImpl;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
 import org.apache.hadoop.hive.llap.metrics.MetricsUtils;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
-import org.apache.hadoop.hive.ql.io.orc.encoded.OrcCacheKey;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.metrics2.util.MBeans;
@@ -91,8 +89,6 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
     LOG.info("Started llap daemon metrics with displayName: " + displayName +
         " sessionId: " + sessionId);
 
-    Cache<OrcCacheKey> cache = null; // High-level cache is not implemented or supported.
-
     OrcMetadataCache metadataCache = null;
     LowLevelCacheImpl orcCache = null;
     BufferUsageManager bufferManager = null;
@@ -131,7 +127,7 @@ public class LlapIoImpl implements LlapIo<VectorizedRowBatch> {
 
     // TODO: this should depends on input format and be in a map, or something.
     this.cvp = new OrcColumnVectorProducer(
-        metadataCache, orcCache, bufferManager, cache, conf, cacheMetrics, queueMetrics);
+        metadataCache, orcCache, bufferManager, conf, cacheMetrics, queueMetrics);
     if (LOGL.isInfoEnabled()) {
       LOG.info("LLAP IO initialized");
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
index b81e97d..137acb0 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/EncodedDataConsumer.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hive.llap.io.decode;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hive.common.Pool;
@@ -30,14 +27,9 @@ import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 import org.apache.hive.common.util.FixedSizedObjectPool;
 
-/**
- *
- */
 public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedColumnBatch<BatchKey>>
   implements Consumer<BatchType>, ReadPipeline {
   private volatile boolean isStopped = false;
-  // TODO: use array, precreate array based on metadata first? Works for ORC. For now keep dumb.
-  private final HashMap<BatchKey, BatchType> pendingData = new HashMap<>();
   private ConsumerFeedback<BatchType> upstreamFeedback;
   private final Consumer<ColumnVectorBatch> downstreamConsumer;
   private Callable<Void> readCallable;
@@ -76,50 +68,15 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
 
   @Override
   public void consumeData(BatchType data) {
-    // TODO: data arrives in whole batches now, not in columns. We could greatly simplify this.
-    BatchType targetBatch = null;
-    boolean localIsStopped = false;
-    Integer targetBatchVersion = null;
-    synchronized (pendingData) {
-      localIsStopped = isStopped;
-      if (!localIsStopped) {
-        targetBatch = pendingData.get(data.getBatchKey());
-        if (targetBatch == null) {
-          targetBatch = data;
-          pendingData.put(data.getBatchKey(), data);
-        }
-        // We have the map locked; the code the throws things away from map only bumps the version
-        // under the same map lock; code the throws things away here only bumps the version when
-        // the batch was taken out of the map.
-        targetBatchVersion = targetBatch.version;
-      }
-      queueMetrics.setQueueSize(pendingData.size());
-    }
-    if (localIsStopped) {
-      returnSourceData(data);
-      return;
-    }
-    assert targetBatchVersion != null;
-    synchronized (targetBatch) {
-      if (targetBatch != data) {
-        throw new UnsupportedOperationException("Merging is not supported");
-      }
-      synchronized (pendingData) {
-        targetBatch = isStopped ? null : pendingData.remove(data.getBatchKey());
-        // Check if someone already threw this away and changed the version.
-        localIsStopped = (targetBatchVersion != targetBatch.version);
-      }
-      // We took the batch out of the map. No more contention with stop possible.
-    }
-    if (localIsStopped && (targetBatch != data)) {
+    if (isStopped) {
       returnSourceData(data);
       return;
     }
     long start = System.currentTimeMillis();
-    decodeBatch(targetBatch, downstreamConsumer);
+    decodeBatch(data, downstreamConsumer);
     long end = System.currentTimeMillis();
     queueMetrics.addProcessingTime(end - start);
-    returnSourceData(targetBatch);
+    returnSourceData(data);
   }
 
   /**
@@ -127,7 +84,6 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
    * of the ECB in question; or, if ECB is still in pendingData, pendingData must be locked.
    */
   private void returnSourceData(BatchType data) {
-    ++data.version;
     upstreamFeedback.returnData(data);
   }
 
@@ -136,19 +92,12 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
 
   @Override
   public void setDone() {
-    synchronized (pendingData) {
-      if (!pendingData.isEmpty()) {
-        throw new AssertionError("Not all data has been sent downstream: " + pendingData.size());
-      }
-    }
     downstreamConsumer.setDone();
   }
 
-
   @Override
   public void setError(Throwable t) {
     downstreamConsumer.setError(t);
-    dicardPendingData(false);
   }
 
   @Override
@@ -156,28 +105,10 @@ public abstract class EncodedDataConsumer<BatchKey, BatchType extends EncodedCol
     cvbPool.offer(data);
   }
 
-  private void dicardPendingData(boolean isStopped) {
-    List<BatchType> batches = new ArrayList<BatchType>(
-        pendingData.size());
-    synchronized (pendingData) {
-      if (isStopped) {
-        this.isStopped = true;
-      }
-      for (BatchType ecb : pendingData.values()) {
-        ++ecb.version;
-        batches.add(ecb);
-      }
-      pendingData.clear();
-    }
-    for (BatchType batch : batches) {
-      upstreamFeedback.returnData(batch);
-    }
-  }
-
   @Override
   public void stop() {
     upstreamFeedback.stop();
-    dicardPendingData(true);
+    this.isStopped = true;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
index 18191da..37fc8d0 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcColumnVectorProducer.java
@@ -23,7 +23,6 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.llap.cache.BufferUsageManager;
-import org.apache.hadoop.hive.llap.cache.Cache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCacheImpl;
 import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
@@ -34,14 +33,12 @@ import org.apache.hadoop.hive.llap.io.metadata.OrcMetadataCache;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonCacheMetrics;
 import org.apache.hadoop.hive.llap.metrics.LlapDaemonQueueMetrics;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
-import org.apache.hadoop.hive.ql.io.orc.encoded.OrcCacheKey;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.mapred.FileSplit;
 
 public class OrcColumnVectorProducer implements ColumnVectorProducer {
 
   private final OrcMetadataCache metadataCache;
-  private final Cache<OrcCacheKey> cache;
   private final LowLevelCache lowLevelCache;
   private final BufferUsageManager bufferManager;
   private final Configuration conf;
@@ -50,7 +47,7 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
   private LlapDaemonQueueMetrics queueMetrics;
 
   public OrcColumnVectorProducer(OrcMetadataCache metadataCache,
-      LowLevelCacheImpl lowLevelCache, BufferUsageManager bufferManager, Cache<OrcCacheKey> cache,
+      LowLevelCacheImpl lowLevelCache, BufferUsageManager bufferManager,
       Configuration conf, LlapDaemonCacheMetrics metrics, LlapDaemonQueueMetrics queueMetrics) {
     if (LlapIoImpl.LOGL.isInfoEnabled()) {
       LlapIoImpl.LOG.info("Initializing ORC column vector producer");
@@ -59,7 +56,6 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
     this.metadataCache = metadataCache;
     this.lowLevelCache = lowLevelCache;
     this.bufferManager = bufferManager;
-    this.cache = cache;
     this.conf = conf;
     this._skipCorrupt = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_SKIP_CORRUPT_DATA);
     this.cacheMetrics = metrics;
@@ -74,7 +70,7 @@ public class OrcColumnVectorProducer implements ColumnVectorProducer {
     cacheMetrics.incrCacheReadRequests();
     OrcEncodedDataConsumer edc = new OrcEncodedDataConsumer(consumer, columnIds.size(),
         _skipCorrupt, counters, queueMetrics);
-    OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager, cache,
+    OrcEncodedDataReader reader = new OrcEncodedDataReader(lowLevelCache, bufferManager,
         metadataCache, conf, split, columnIds, sarg, columnNames, edc, counters);
     edc.init(reader, reader);
     return edc;

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
index 28cae87..7ee263d 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
@@ -54,6 +54,7 @@ public class OrcEncodedDataConsumer
       Consumer<ColumnVectorBatch> consumer, int colCount, boolean skipCorrupt,
       QueryFragmentCounters counters, LlapDaemonQueueMetrics queueMetrics) {
     super(consumer, colCount, queueMetrics);
+    // TODO: get rid of this
     this.skipCorrupt = skipCorrupt;
     this.counters = counters;
   }
@@ -62,7 +63,6 @@ public class OrcEncodedDataConsumer
     assert fileMetadata == null;
     fileMetadata = f;
     stripes = new OrcStripeMetadata[f.getStripes().size()];
-    // TODO: get rid of this
     codec = WriterImpl.createCodec(fileMetadata.getCompressionKind());
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index 8111c6d..eb251a8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.ConsumerFeedback;
 import org.apache.hadoop.hive.llap.DebugUtils;
 import org.apache.hadoop.hive.llap.cache.BufferUsageManager;
-import org.apache.hadoop.hive.llap.cache.Cache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
@@ -71,7 +70,6 @@ import org.apache.hadoop.hive.ql.io.orc.encoded.Consumer;
 import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedOrcFile;
 import org.apache.hadoop.hive.ql.io.orc.encoded.EncodedReader;
 import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
-import org.apache.hadoop.hive.ql.io.orc.encoded.OrcCacheKey;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.OrcEncodedColumnBatch;
 import org.apache.hadoop.hive.ql.io.orc.encoded.Reader.PoolFactory;
 import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
@@ -90,7 +88,7 @@ import org.apache.tez.common.CallableWithNdc;
  * consumer. It also serves as ConsumerFeedback that receives processed EncodedColumnBatch-es.
  */
 public class OrcEncodedDataReader extends CallableWithNdc<Void>
-    implements ConsumerFeedback<OrcEncodedColumnBatch>, Consumer<OrcEncodedColumnBatch> {
+    implements ConsumerFeedback<OrcEncodedColumnBatch> {
   private static final Logger LOG = LoggerFactory.getLogger(OrcEncodedDataReader.class);
   public static final FixedSizedObjectPool<ColumnStreamData> CSD_POOL =
       new FixedSizedObjectPool<>(8192, new PoolObjectHelper<ColumnStreamData>() {
@@ -135,7 +133,6 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private final LowLevelCache lowLevelCache;
   private final BufferUsageManager bufferManager;
   private final Configuration conf;
-  private final Cache<OrcCacheKey> cache;
   private final FileSplit split;
   private List<Integer> columnIds;
   private final SearchArgument sarg;
@@ -150,7 +147,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private Reader orcReader;
   private MetadataReader metadataReader;
   private EncodedReader stripeReader;
-  private Long fileId;
+  private Object fileKey;
   private FileSystem fs;
   /**
    * readState[stripeIx'][colIx'] => boolean array (could be a bitmask) of rg-s that need to be
@@ -162,13 +159,12 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private volatile boolean isPaused = false;
 
   public OrcEncodedDataReader(LowLevelCache lowLevelCache, BufferUsageManager bufferManager,
-      Cache<OrcCacheKey> cache, OrcMetadataCache metadataCache, Configuration conf,
-      FileSplit split, List<Integer> columnIds, SearchArgument sarg, String[] columnNames,
-      OrcEncodedDataConsumer consumer, QueryFragmentCounters counters) {
+      OrcMetadataCache metadataCache, Configuration conf, FileSplit split, List<Integer> columnIds,
+      SearchArgument sarg, String[] columnNames, OrcEncodedDataConsumer consumer,
+      QueryFragmentCounters counters) {
     this.lowLevelCache = lowLevelCache;
     this.metadataCache = metadataCache;
     this.bufferManager = bufferManager;
-    this.cache = cache;
     this.conf = conf;
     this.split = split;
     this.columnIds = columnIds;
@@ -230,10 +226,10 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     // 1. Get file metadata from cache, or create the reader and read it.
     // Don't cache the filesystem object for now; Tez closes it and FS cache will fix all that
     fs = split.getPath().getFileSystem(conf);
-    fileId = determineFileId(fs, split,
+    fileKey = determineFileId(fs, split,
         HiveConf.getBoolVar(conf, ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID));
     counters.setDesc(QueryFragmentCounters.Desc.FILE, split.getPath()
-        + (fileId == null ? "" : " (" + fileId + ")"));
+        + (fileKey == null ? "" : " (" + fileKey + ")"));
 
     try {
       fileMetadata = getOrReadFileMetadata();
@@ -307,27 +303,13 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     //    read for every stripe (null means read all of them - the usual path). In any case,
     //    readState will be modified for column x rgs that were fetched from high-level cache.
     List<Integer>[] stripeColsToRead = null;
-    if (cache != null) {
-      try {
-        stripeColsToRead = produceDataFromCache(stride);
-      } catch (Throwable t) {
-        // produceDataFromCache handles its own cleanup.
-        consumer.setError(t);
-        cleanupReaders();
-        recordReaderTime(startTime);
-        return null;
-      }
-    }
 
     // 5. Create encoded data reader.
-    // In case if we have high-level cache, we will intercept the data and add it there;
-    // otherwise just pass the data directly to the consumer.
-    Consumer<OrcEncodedColumnBatch> dataConsumer = (cache == null) ? this.consumer : this;
     try {
       ensureOrcReader();
       // Reader creating updates HDFS counters, don't do it here.
       DataWrapperForOrc dw = new DataWrapperForOrc();
-      stripeReader = orcReader.encodedReader(fileId, dw, dw, POOL_FACTORY);
+      stripeReader = orcReader.encodedReader(fileKey, dw, dw, POOL_FACTORY);
       stripeReader.setDebugTracing(DebugUtils.isTraceOrcEnabled());
     } catch (Throwable t) {
       consumer.setError(t);
@@ -338,9 +320,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
 
     // 6. Read data.
     // TODO: I/O threadpool could be here - one thread per stripe; for now, linear.
-    boolean hasFileId = this.fileId != null;
-    long fileId = hasFileId ? this.fileId : 0;
-    OrcBatchKey stripeKey = hasFileId ? new OrcBatchKey(fileId, -1, 0) : null;
+    boolean hasFileId = this.fileKey != null;
+    OrcBatchKey stripeKey = hasFileId ? new OrcBatchKey(fileKey, -1, 0) : null;
     for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
       if (processStop()) {
         cleanupReaders();
@@ -369,7 +350,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
         if (colRgs.length > 0 && colRgs[0] == SargApplier.READ_NO_RGS) continue;
 
         // 6.1. Determine the columns to read (usually the same as requested).
-        if (cache == null || cols == null || cols.size() == colRgs.length) {
+        if (cols == null || cols.size() == colRgs.length) {
           cols = columnIds;
           stripeIncludes = globalIncludes;
         } else {
@@ -393,7 +374,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
             counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
             ensureMetadataReader();
             long startTimeHdfs = counters.startTimeCounter();
-            stripeMetadata = new OrcStripeMetadata(new OrcBatchKey(fileId, stripeIx, 0),
+            stripeMetadata = new OrcStripeMetadata(new OrcBatchKey(fileKey, stripeIx, 0),
                 metadataReader, stripe, stripeIncludes, sargColumns);
             counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTimeHdfs);
             if (hasFileId && metadataCache != null) {
@@ -439,7 +420,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
         // data it receives for one stripe. We could probably interrupt it, if it checked that.
         stripeReader.readEncodedColumns(stripeIx, stripe, stripeMetadata.getRowIndexes(),
             stripeMetadata.getEncodings(), stripeMetadata.getStreams(), stripeIncludes,
-            colRgs, dataConsumer);
+            colRgs, consumer);
       } catch (Throwable t) {
         consumer.setError(t);
         cleanupReaders();
@@ -450,7 +431,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
 
     // Done with all the things.
     recordReaderTime(startTime);
-    dataConsumer.setDone();
+    consumer.setDone();
     if (DebugUtils.isTraceMttEnabled()) {
       LlapIoImpl.LOG.info("done processing " + split);
     }
@@ -525,12 +506,12 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     return true;
   }
 
-  private static Long determineFileId(FileSystem fs, FileSplit split,
+  private static Object determineFileId(FileSystem fs, FileSplit split,
       boolean allowSynthetic) throws IOException {
     if (split instanceof OrcSplit) {
-      Long fileId = ((OrcSplit)split).getFileId();
-      if (fileId != null) {
-        return fileId;
+      Object fileKey = ((OrcSplit)split).getFileKey();
+      if (fileKey != null) {
+        return fileKey;
       }
     }
     LOG.warn("Split for " + split.getPath() + " (" + split.getClass() + ") does not have file ID");
@@ -600,8 +581,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private void ensureOrcReader() throws IOException {
     if (orcReader != null) return;
     Path path = split.getPath();
-    if (fileId != null && HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_USE_FILEID_PATH)) {
-      path = HdfsUtils.getFileIdPath(fs, path, fileId);
+    if (fileKey instanceof Long && HiveConf.getBoolVar(conf, ConfVars.LLAP_IO_USE_FILEID_PATH)) {
+      path = HdfsUtils.getFileIdPath(fs, path, (long)fileKey);
     }
     if (DebugUtils.isTraceOrcEnabled()) {
       LOG.info("Creating reader for " + path + " (" + split.getPath() + ")");
@@ -617,8 +598,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
    */
   private OrcFileMetadata getOrReadFileMetadata() throws IOException {
     OrcFileMetadata metadata = null;
-    if (fileId != null && metadataCache != null) {
-      metadata = metadataCache.getFileMetadata(fileId);
+    if (fileKey != null && metadataCache != null) {
+      metadata = metadataCache.getFileMetadata(fileKey);
       if (metadata != null) {
         counters.incrCounter(LlapIOCounters.METADATA_CACHE_HIT);
         return metadata;
@@ -628,8 +609,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     }
     ensureOrcReader();
     // We assume this call doesn't touch HDFS because everything is already read; don't add time.
-    metadata = new OrcFileMetadata(fileId != null ? fileId : 0, orcReader);
-    if (fileId == null || metadataCache == null) return metadata;
+    metadata = new OrcFileMetadata(fileKey, orcReader);
+    if (fileKey == null || metadataCache == null) return metadata;
     return metadataCache.putFileMetadata(metadata);
   }
 
@@ -639,9 +620,8 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   private ArrayList<OrcStripeMetadata> readStripesMetadata(
       boolean[] globalInc, boolean[] sargColumns) throws IOException {
     ArrayList<OrcStripeMetadata> result = new ArrayList<OrcStripeMetadata>(readState.length);
-    boolean hasFileId = this.fileId != null;
-    long fileId = hasFileId ? this.fileId : 0;
-    OrcBatchKey stripeKey = hasFileId ? new OrcBatchKey(fileId, 0, 0) : null;
+    boolean hasFileId = this.fileKey != null;
+    OrcBatchKey stripeKey = hasFileId ? new OrcBatchKey(fileKey, 0, 0) : null;
     for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
       OrcStripeMetadata value = null;
       int stripeIx = stripeIxMod + stripeIxFrom;
@@ -655,7 +635,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
         StripeInformation si = fileMetadata.getStripes().get(stripeIx);
         if (value == null) {
           long startTime = counters.startTimeCounter();
-          value = new OrcStripeMetadata(new OrcBatchKey(fileId, stripeIx, 0),
+          value = new OrcStripeMetadata(new OrcBatchKey(fileKey, stripeIx, 0),
               metadataReader, si, globalInc, sargColumns);
           counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
           if (hasFileId && metadataCache != null) {
@@ -836,105 +816,6 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     readState = new boolean[stripeIxTo - stripeIxFrom][][];
   }
 
-  // TODO: split by stripe? we do everything by stripe, and it might be faster
-  /**
-   * Takes the data from high-level cache for all stripes and returns to consumer.
-   * @return List of columns to read per stripe, if any columns were fully eliminated by cache.
-   */
-  private List<Integer>[] produceDataFromCache(int rowIndexStride) throws IOException {
-    OrcCacheKey key = new OrcCacheKey(fileId, -1, -1, -1);
-    // For each stripe, keep a list of columns that are not fully in cache (null => all of them).
-    @SuppressWarnings("unchecked")
-    List<Integer>[] stripeColsNotInCache = new List[readState.length];
-    for (int stripeIxMod = 0; stripeIxMod < readState.length; ++stripeIxMod) {
-      key.stripeIx = stripeIxFrom + stripeIxMod;
-      boolean[][] cols = readState[stripeIxMod];
-      boolean[] isMissingAnyRgs = new boolean[cols.length];
-      int totalRgCount = getRgCount(fileMetadata.getStripes().get(key.stripeIx), rowIndexStride);
-      for (int rgIx = 0; rgIx < totalRgCount; ++rgIx) {
-        OrcEncodedColumnBatch col = ECB_POOL.take();
-        col.init(fileId, key.stripeIx, rgIx, cols.length);
-        boolean hasAnyCached = false;
-        try {
-          key.rgIx = rgIx;
-          for (int colIxMod = 0; colIxMod < cols.length; ++colIxMod) {
-            boolean[] readMask = cols[colIxMod];
-            // Check if RG is eliminated by SARG
-            if ((readMask == SargApplier.READ_NO_RGS) || (readMask != SargApplier.READ_ALL_RGS
-                && (readMask.length <= rgIx || !readMask[rgIx]))) continue;
-            key.colIx = columnIds.get(colIxMod);
-            ColumnStreamData[] cached = cache.get(key);
-            if (cached == null) {
-              isMissingAnyRgs[colIxMod] = true;
-              continue;
-            }
-            assert cached.length == OrcEncodedColumnBatch.MAX_DATA_STREAMS;
-            col.setAllStreamsData(colIxMod, key.colIx, cached);
-            hasAnyCached = true;
-            if (readMask == SargApplier.READ_ALL_RGS) {
-              // We were going to read all RGs, but some were in cache, allocate the mask.
-              cols[colIxMod] = readMask = new boolean[totalRgCount];
-              Arrays.fill(readMask, true);
-            }
-            readMask[rgIx] = false; // Got from cache, don't read from disk.
-          }
-        } catch (Throwable t) {
-          // TODO: Any cleanup needed to release data in col back to cache should be here.
-          throw (t instanceof IOException) ? (IOException)t : new IOException(t);
-        }
-        if (hasAnyCached) {
-          consumer.consumeData(col);
-        }
-      }
-      boolean makeStripeColList = false; // By default assume we'll fetch all original columns.
-      for (int colIxMod = 0; colIxMod < cols.length; ++colIxMod) {
-        if (isMissingAnyRgs[colIxMod]) {
-          if (makeStripeColList) {
-            stripeColsNotInCache[stripeIxMod].add(columnIds.get(colIxMod));
-          }
-        } else if (!makeStripeColList) {
-          // Some columns were fully in cache. Make a per-stripe col list, add previous columns.
-          makeStripeColList = true;
-          stripeColsNotInCache[stripeIxMod] = new ArrayList<Integer>(cols.length - 1);
-          for (int i = 0; i < colIxMod; ++i) {
-            stripeColsNotInCache[stripeIxMod].add(columnIds.get(i));
-          }
-        }
-      }
-    }
-    return stripeColsNotInCache;
-  }
-
-  @Override
-  public void setDone() {
-    consumer.setDone();
-  }
-
-  @Override
-  public void consumeData(OrcEncodedColumnBatch data) {
-    // Store object in cache; create new key object - cannot be reused.
-    assert cache != null;
-    throw new UnsupportedOperationException("not implemented");
-    /*for (int i = 0; i < data.getColumnData().length; ++i) {
-      OrcCacheKey key = new OrcCacheKey(data.getBatchKey(), data.getColumnIxs()[i]);
-      ColumnStreamData[] toCache = data.getColumnData()[i];
-      ColumnStreamData[] cached = cache.cacheOrGet(key, toCache);
-      if (toCache != cached) {
-        for (ColumnStreamData sb : toCache) {
-          if (sb.decRef() != 0) continue;
-          lowLevelCache.releaseBuffers(sb.getCacheBuffers());
-        }
-        data.getColumnData()[i] = cached;
-      }
-    }
-    consumer.consumeData(data);*/
-  }
-
-  @Override
-  public void setError(Throwable t) {
-    consumer.setError(t);
-  }
-
   private class DataWrapperForOrc implements DataReader, DataCache {
     private final DataReader orcDataReader;
 
@@ -948,17 +829,17 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     }
 
     @Override
-    public DiskRangeList getFileData(long fileId, DiskRangeList range,
+    public DiskRangeList getFileData(Object fileKey, DiskRangeList range,
         long baseOffset, DiskRangeListFactory factory, BooleanRef gotAllData) {
       return (lowLevelCache == null) ? range : lowLevelCache.getFileData(
-          fileId, range, baseOffset, factory, counters, gotAllData);
+          fileKey, range, baseOffset, factory, counters, gotAllData);
     }
 
     @Override
-    public long[] putFileData(long fileId, DiskRange[] ranges,
+    public long[] putFileData(Object fileKey, DiskRange[] ranges,
         MemoryBuffer[] data, long baseOffset) {
       return (lowLevelCache == null) ? null : lowLevelCache.putFileData(
-          fileId, ranges, data, baseOffset, Priority.NORMAL, counters);
+          fileKey, ranges, data, baseOffset, Priority.NORMAL, counters);
     }
 
     @Override
@@ -989,7 +870,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
       DiskRangeList result = orcDataReader.readFileData(range, baseOffset, doForceDirect);
       counters.recordHdfsTime(startTime);
       if (DebugUtils.isTraceOrcEnabled() && LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after disk read (file " + fileId + ", base offset " + baseOffset
+        LOG.info("Disk ranges after disk read (file " + fileKey + ", base offset " + baseOffset
               + "): " + RecordReaderUtils.stringifyDiskRanges(result));
       }
       return result;

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
index 2e4e0c5..4e42a0f 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcFileMetadata.java
@@ -18,23 +18,22 @@
 
 package org.apache.hadoop.hive.llap.io.metadata;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
-
 import org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator;
 import org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator.ObjectEstimator;
 import org.apache.hadoop.hive.llap.cache.EvictionDispatcher;
 import org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer;
-import org.apache.orc.CompressionKind;
-import org.apache.orc.FileMetadata;
+import org.apache.hadoop.hive.ql.io.SyntheticFileId;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
 import org.apache.hadoop.hive.ql.io.orc.Reader;
 import org.apache.hadoop.hive.ql.io.orc.ReaderImpl.StripeInformationImpl;
-import org.apache.orc.StripeInformation;
+import org.apache.orc.CompressionKind;
+import org.apache.orc.FileMetadata;
 import org.apache.orc.OrcProto;
-
-import com.google.common.annotations.VisibleForTesting;
+import org.apache.orc.StripeInformation;
 
 /** ORC file metadata. Currently contains some duplicate info due to how different parts
  * of ORC use different info. Ideally we would get rid of protobuf structs in code beyond reading,
@@ -46,7 +45,7 @@ public final class OrcFileMetadata extends LlapCacheableBuffer implements FileMe
   private final List<OrcProto.StripeStatistics> stripeStats;
   private final List<OrcProto.Type> types;
   private final List<OrcProto.ColumnStatistics> fileStats;
-  private final long fileId;
+  private final Object fileKey;
   private final CompressionKind compressionKind;
   private final int rowIndexStride;
   private final int compressionBufferSize;
@@ -61,16 +60,18 @@ public final class OrcFileMetadata extends LlapCacheableBuffer implements FileMe
   private final static HashMap<Class<?>, ObjectEstimator> SIZE_ESTIMATORS;
   private final static ObjectEstimator SIZE_ESTIMATOR;
   static {
-    OrcFileMetadata ofm = createDummy(0);
+    OrcFileMetadata ofm = createDummy(new SyntheticFileId());
     SIZE_ESTIMATORS = IncrementalObjectSizeEstimator.createEstimators(ofm);
     IncrementalObjectSizeEstimator.addEstimator(
         "com.google.protobuf.LiteralByteString", SIZE_ESTIMATORS);
+    // Add long for the regular file ID estimation.
+    IncrementalObjectSizeEstimator.createEstimators(Long.class, SIZE_ESTIMATORS);
     SIZE_ESTIMATOR = SIZE_ESTIMATORS.get(OrcFileMetadata.class);
   }
 
   @VisibleForTesting
-  public static OrcFileMetadata createDummy(int fileId) {
-    OrcFileMetadata ofm = new OrcFileMetadata(fileId);
+  public static OrcFileMetadata createDummy(Object fileKey) {
+    OrcFileMetadata ofm = new OrcFileMetadata(fileKey);
     ofm.stripes.add(new StripeInformationImpl(
         OrcProto.StripeInformation.getDefaultInstance()));
     ofm.fileStats.add(OrcProto.ColumnStatistics.getDefaultInstance());
@@ -87,8 +88,8 @@ public final class OrcFileMetadata extends LlapCacheableBuffer implements FileMe
   }
 
   // Ctor for memory estimation and tests
-  private OrcFileMetadata(int fileId) {
-    this.fileId = fileId;
+  private OrcFileMetadata(Object fileKey) {
+    this.fileKey = fileKey;
     stripes = new ArrayList<StripeInformation>();
     versionList = new ArrayList<Integer>();
     fileStats = new ArrayList<>();
@@ -101,8 +102,8 @@ public final class OrcFileMetadata extends LlapCacheableBuffer implements FileMe
     compressionKind = CompressionKind.NONE;
   }
 
-  public OrcFileMetadata(long fileId, Reader reader) {
-    this.fileId = fileId;
+  public OrcFileMetadata(Object fileKey, Reader reader) {
+    this.fileKey = fileKey;
     this.stripeStats = reader.getOrcProtoStripeStatistics();
     this.compressionKind = reader.getCompressionKind();
     this.compressionBufferSize = reader.getCompressionSize();
@@ -183,8 +184,8 @@ public final class OrcFileMetadata extends LlapCacheableBuffer implements FileMe
   }
 
   @Override
-  public long getFileId() {
-    return fileId;
+  public Object getFileKey() {
+    return fileKey;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
index 43c8fb3..e970137 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcMetadataCache.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
 
 public class OrcMetadataCache {
-  private final ConcurrentHashMap<Long, OrcFileMetadata> metadata =
-      new ConcurrentHashMap<Long, OrcFileMetadata>();
+  private final ConcurrentHashMap<Object, OrcFileMetadata> metadata =
+      new ConcurrentHashMap<Object, OrcFileMetadata>();
   private final ConcurrentHashMap<OrcBatchKey, OrcStripeMetadata> stripeMetadata =
       new ConcurrentHashMap<OrcBatchKey, OrcStripeMetadata>();
   private final MemoryManager memoryManager;
@@ -42,7 +42,7 @@ public class OrcMetadataCache {
   public OrcFileMetadata putFileMetadata(OrcFileMetadata metaData) {
     long memUsage = metaData.getMemoryUsage();
     memoryManager.reserveMemory(memUsage, false);
-    OrcFileMetadata val = metadata.putIfAbsent(metaData.getFileId(), metaData);
+    OrcFileMetadata val = metadata.putIfAbsent(metaData.getFileKey(), metaData);
     // See OrcFileMetadata; it is always unlocked, so we just "touch" it here to simulate use.
     if (val == null) {
       val = metaData;
@@ -75,12 +75,12 @@ public class OrcMetadataCache {
     return stripeMetadata.get(stripeKey);
   }
 
-  public OrcFileMetadata getFileMetadata(long fileId) throws IOException {
-    return metadata.get(fileId);
+  public OrcFileMetadata getFileMetadata(Object fileKey) throws IOException {
+    return metadata.get(fileKey);
   }
 
   public void notifyEvicted(OrcFileMetadata buffer) {
-    metadata.remove(buffer.getFileId());
+    metadata.remove(buffer.getFileKey());
     // See OrcFileMetadata - we don't clear the object, it will be GCed when released by users.
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcStripeMetadata.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcStripeMetadata.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcStripeMetadata.java
index 8479d22..82187bd 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcStripeMetadata.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/metadata/OrcStripeMetadata.java
@@ -17,22 +17,21 @@
  */
 package org.apache.hadoop.hive.llap.io.metadata;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
-
 import org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator;
 import org.apache.hadoop.hive.llap.IncrementalObjectSizeEstimator.ObjectEstimator;
 import org.apache.hadoop.hive.llap.cache.EvictionDispatcher;
 import org.apache.hadoop.hive.llap.cache.LlapCacheableBuffer;
-import org.apache.orc.impl.MetadataReader;
-import org.apache.orc.impl.OrcIndex;
-import org.apache.orc.StripeInformation;
+import org.apache.hadoop.hive.ql.io.SyntheticFileId;
 import org.apache.hadoop.hive.ql.io.orc.encoded.OrcBatchKey;
 import org.apache.orc.OrcProto;
-
-import com.google.common.annotations.VisibleForTesting;
+import org.apache.orc.StripeInformation;
+import org.apache.orc.impl.MetadataReader;
+import org.apache.orc.impl.OrcIndex;
 
 public class OrcStripeMetadata extends LlapCacheableBuffer {
   private final OrcBatchKey stripeKey;
@@ -46,10 +45,12 @@ public class OrcStripeMetadata extends LlapCacheableBuffer {
   private final static HashMap<Class<?>, ObjectEstimator> SIZE_ESTIMATORS;
   private final static ObjectEstimator SIZE_ESTIMATOR;
   static {
-    OrcStripeMetadata osm = createDummy(0);
+    OrcStripeMetadata osm = createDummy(new SyntheticFileId());
     SIZE_ESTIMATORS = IncrementalObjectSizeEstimator.createEstimators(osm);
     IncrementalObjectSizeEstimator.addEstimator(
         "com.google.protobuf.LiteralByteString", SIZE_ESTIMATORS);
+    // Add long for the regular file ID estimation.
+    IncrementalObjectSizeEstimator.createEstimators(Long.class, SIZE_ESTIMATORS);
     SIZE_ESTIMATOR = SIZE_ESTIMATORS.get(OrcStripeMetadata.class);
   }
 
@@ -65,7 +66,7 @@ public class OrcStripeMetadata extends LlapCacheableBuffer {
     estimatedMemUsage = SIZE_ESTIMATOR.estimate(this, SIZE_ESTIMATORS);
   }
 
-  private OrcStripeMetadata(long id) {
+  private OrcStripeMetadata(Object id) {
     stripeKey = new OrcBatchKey(id, 0, 0);
     encodings = new ArrayList<>();
     streams = new ArrayList<>();
@@ -73,7 +74,7 @@ public class OrcStripeMetadata extends LlapCacheableBuffer {
   }
 
   @VisibleForTesting
-  public static OrcStripeMetadata createDummy(long id) {
+  public static OrcStripeMetadata createDummy(Object id) {
     OrcStripeMetadata dummy = new OrcStripeMetadata(id);
     dummy.encodings.add(OrcProto.ColumnEncoding.getDefaultInstance());
     dummy.streams.add(OrcProto.Stream.getDefaultInstance());

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/orc/src/java/org/apache/orc/FileMetadata.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/FileMetadata.java b/orc/src/java/org/apache/orc/FileMetadata.java
index d63bdcc..807e696 100644
--- a/orc/src/java/org/apache/orc/FileMetadata.java
+++ b/orc/src/java/org/apache/orc/FileMetadata.java
@@ -44,7 +44,7 @@ public interface FileMetadata {
 
   int getFlattenedColumnCount();
 
-  long getFileId();
+  Object getFileKey();
 
   List<Integer> getVersionList();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
index af64fc8..1a40847 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
@@ -33,30 +33,18 @@ public class HdfsUtils {
   private static final HadoopShims SHIMS = ShimLoader.getHadoopShims();
   private static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
 
-  public static Long getFileId(
+  public static Object getFileId(
       FileSystem fileSystem, Path path, boolean allowSynthetic) throws IOException {
-    String pathStr = path.toUri().getPath();
     if (fileSystem instanceof DistributedFileSystem) {
-      return SHIMS.getFileId(fileSystem, pathStr);
+      return SHIMS.getFileId(fileSystem, path.toUri().getPath());
     }
     if (!allowSynthetic) {
       LOG.warn("Cannot get unique file ID from "
         + fileSystem.getClass().getSimpleName() + "; returning null");
       return null;
     }
-    // If we are not on DFS, we just hash the file name + size and hope for the best.
-    // TODO: we assume it only happens in tests. Fix?
-    int nameHash = pathStr.hashCode();
     FileStatus fs = fileSystem.getFileStatus(path);
-    long fileSize = fs.getLen(), modTime = fs.getModificationTime();
-    int fileSizeHash = (int)(fileSize ^ (fileSize >>> 32)),
-        modTimeHash = (int)(modTime ^ (modTime >>> 32)),
-        combinedHash = modTimeHash ^ fileSizeHash;
-    long id = (((long)nameHash & 0xffffffffL) << 32) | ((long)combinedHash & 0xffffffffL);
-    LOG.warn("Cannot get unique file ID from "
-        + fileSystem.getClass().getSimpleName() + "; using " + id + " (" + pathStr
-        + "," + nameHash + "," + fileSize + ")");
-    return id;
+    return new SyntheticFileId(path, fs.getLen(), fs.getModificationTime());
   }
 
   // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/SyntheticFileId.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/SyntheticFileId.java b/ql/src/java/org/apache/hadoop/hive/ql/io/SyntheticFileId.java
new file mode 100644
index 0000000..905bbb9
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/SyntheticFileId.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Writable;
+
+public final class SyntheticFileId implements Writable {
+  private long pathHash;
+  private long modTime;
+  private long length;
+
+  /** Writable ctor. */
+  public SyntheticFileId() {
+  }
+
+  public SyntheticFileId(Path path, long len, long modificationTime) {
+    this.pathHash = hashCode(path.toUri().getPath());
+    this.modTime = modificationTime;
+    this.length = len;
+  }
+
+  public SyntheticFileId(FileStatus file) {
+    this(file.getPath(), file.getLen(), file.getModificationTime());
+  }
+
+  @Override
+  public String toString() {
+    return "[" + pathHash + ", " + modTime + ", " + length + "]";
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = prime + (int) (length ^ (length >>> 32));
+    result = prime * result + (int) (modTime ^ (modTime >>> 32));
+    return prime * result + (int) (pathHash ^ (pathHash >>> 32));
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) return true;
+    if (!(obj instanceof SyntheticFileId)) return false;
+    SyntheticFileId other = (SyntheticFileId)obj;
+    return length == other.length && modTime == other.modTime && pathHash == other.pathHash;
+  }
+
+  private long hashCode(String path) {
+    long h = 0;
+    for (int i = 0; i < path.length(); ++i) {
+      h = 1223 * h + path.charAt(i);
+    }
+    return h;
+  }
+
+  /** Length allows for some backward compatibility wrt field addition. */
+  private static final short THREE_LONGS = 24;
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    out.writeShort(THREE_LONGS);
+    out.writeLong(pathHash);
+    out.writeLong(modTime);
+    out.writeLong(length);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    short len = in.readShort();
+    if (len < THREE_LONGS) throw new IOException("Need at least " + THREE_LONGS + " bytes");
+    pathHash = in.readLong();
+    modTime = in.readLong();
+    length = in.readLong();
+    int extraBytes = len - THREE_LONGS;
+    if (extraBytes > 0) {
+      in.skipBytes(extraBytes);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index 0ebcd2a..cd2a668 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -84,6 +84,7 @@ import org.apache.hadoop.hive.ql.io.LlapWrappableInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+import org.apache.hadoop.hive.ql.io.SyntheticFileId;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
@@ -727,10 +728,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     boolean[] covered;
     private List<Future<List<OrcSplit>>> splitFuturesRef;
     private final UserGroupInformation ugi;
+    private final boolean allowSyntheticFileIds;
 
     public ETLSplitStrategy(Context context, FileSystem fs, Path dir,
         List<HdfsFileStatusWithId> children, boolean isOriginal, List<DeltaMetaData> deltas,
-        boolean[] covered, UserGroupInformation ugi) {
+        boolean[] covered, UserGroupInformation ugi, boolean allowSyntheticFileIds) {
       assert !children.isEmpty();
       this.context = context;
       this.dirs = Lists.newArrayList(new ETLDir(dir, fs, children.size()));
@@ -739,6 +741,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       this.deltas = deltas;
       this.covered = covered;
       this.ugi = ugi;
+      this.allowSyntheticFileIds = allowSyntheticFileIds;
     }
 
     @Override
@@ -860,7 +863,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       List<Future<List<OrcSplit>>> localList = new ArrayList<>(splits.size());
       UserGroupInformation tpUgi = ugi == null ? UserGroupInformation.getCurrentUser() : ugi;
       for (SplitInfo splitInfo : splits) {
-        localList.add(Context.threadPool.submit(new SplitGenerator(splitInfo, tpUgi)));
+        localList.add(Context.threadPool.submit(
+            new SplitGenerator(splitInfo, tpUgi, allowSyntheticFileIds)));
       }
       synchronized (splitFutures) {
         splitFutures.addAll(localList);
@@ -873,16 +877,17 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
    * as opposed to query execution (split generation does not read or cache file footers).
    */
   static final class BISplitStrategy extends ACIDSplitStrategy {
-    List<HdfsFileStatusWithId> fileStatuses;
-    boolean isOriginal;
-    List<DeltaMetaData> deltas;
-    FileSystem fs;
-    Context context;
-    Path dir;
+    private final List<HdfsFileStatusWithId> fileStatuses;
+    private final boolean isOriginal;
+    private final List<DeltaMetaData> deltas;
+    private final FileSystem fs;
+    private final Context context;
+    private final Path dir;
+    private final boolean allowSyntheticFileIds;
 
     public BISplitStrategy(Context context, FileSystem fs,
         Path dir, List<HdfsFileStatusWithId> fileStatuses, boolean isOriginal,
-        List<DeltaMetaData> deltas, boolean[] covered) {
+        List<DeltaMetaData> deltas, boolean[] covered, boolean allowSyntheticFileIds) {
       super(dir, context.numBuckets, deltas, covered);
       this.context = context;
       this.fileStatuses = fileStatuses;
@@ -890,6 +895,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       this.deltas = deltas;
       this.fs = fs;
       this.dir = dir;
+      this.allowSyntheticFileIds = allowSyntheticFileIds;
     }
 
     @Override
@@ -900,7 +906,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         if (fileStatus.getLen() != 0) {
           String[] hosts = SHIMS.getLocationsWithOffset(fs, fileStatus).firstEntry().getValue()
               .getHosts();
-          OrcSplit orcSplit = new OrcSplit(fileStatus.getPath(), file.getFileId(), 0,
+          Object fileKey = file.getFileId();
+          if (fileKey == null && allowSyntheticFileIds) {
+            fileKey = new SyntheticFileId(fileStatus);
+          }
+          OrcSplit orcSplit = new OrcSplit(fileStatus.getPath(), fileKey, 0,
               fileStatus.getLen(), hosts, null, isOriginal, true, deltas, -1);
           splits.add(orcSplit);
         }
@@ -1029,8 +1039,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   static final class SplitGenerator implements Callable<List<OrcSplit>> {
     private final Context context;
     private final FileSystem fs;
-    private final HdfsFileStatusWithId fileWithId;
     private final FileStatus file;
+    private final Long fsFileId;
     private final long blockSize;
     private final TreeMap<Long, BlockLocation> locations;
     private final FileInfo fileInfo;
@@ -1046,31 +1056,34 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     private long projColsUncompressedSize;
     private final List<OrcSplit> deltaSplits;
     private final UserGroupInformation ugi;
+    private final boolean allowSyntheticFileIds;
 
-    public SplitGenerator(SplitInfo splitInfo, UserGroupInformation ugi) throws IOException {
+    public SplitGenerator(SplitInfo splitInfo, UserGroupInformation ugi,
+        boolean allowSyntheticFileIds) throws IOException {
       this.ugi = ugi;
       this.context = splitInfo.context;
       this.fs = splitInfo.fs;
-      this.fileWithId = splitInfo.fileWithId;
-      this.file = this.fileWithId.getFileStatus();
+      this.file = splitInfo.fileWithId.getFileStatus();
+      this.fsFileId = splitInfo.fileWithId.getFileId();
       this.blockSize = this.file.getBlockSize();
       this.fileInfo = splitInfo.fileInfo;
       // TODO: potential DFS call
-      this.locations = SHIMS.getLocationsWithOffset(fs, fileWithId.getFileStatus());
+      this.locations = SHIMS.getLocationsWithOffset(fs, file);
       this.isOriginal = splitInfo.isOriginal;
       this.deltas = splitInfo.deltas;
       this.hasBase = splitInfo.hasBase;
       this.projColsUncompressedSize = -1;
       this.deltaSplits = splitInfo.getSplits();
+      this.allowSyntheticFileIds = allowSyntheticFileIds;
     }
 
     Path getPath() {
-      return fileWithId.getFileStatus().getPath();
+      return file.getPath();
     }
 
     @Override
     public String toString() {
-      return "splitter(" + fileWithId.getFileStatus().getPath() + ")";
+      return "splitter(" + file.getPath() + ")";
     }
 
     /**
@@ -1133,7 +1146,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
               maxSize = Math.max(maxSize, val.get());
             }
           } else {
-            throw new IOException("File " + fileWithId.getFileStatus().getPath().toString() +
+            throw new IOException("File " + file.getPath().toString() +
                     " should have had overlap on block starting at " + block.getOffset());
           }
         }
@@ -1161,7 +1174,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       final double splitRatio = (double) length / (double) fileLen;
       final long scaledProjSize = projColsUncompressedSize > 0 ?
           (long) (splitRatio * projColsUncompressedSize) : fileLen;
-      return new OrcSplit(file.getPath(), fileWithId.getFileId(), offset, length, hosts,
+      Object fileKey = fsFileId;
+      if (fileKey == null && allowSyntheticFileIds) {
+        fileKey = new SyntheticFileId(file);
+      }
+      return new OrcSplit(file.getPath(), fileKey, offset, length, hosts,
           fileMetaInfo, isOriginal, hasBase, deltas, scaledProjSize);
     }
 
@@ -1274,7 +1291,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
               && fileInfo.writerVersion != null;
           // We assume that if we needed to create a reader, we need to cache it to meta cache.
           // TODO: This will also needlessly overwrite it in local cache for now.
-          context.footerCache.put(fileWithId.getFileId(), file, fileInfo.fileMetaInfo, orcReader);
+          context.footerCache.put(fsFileId, file, fileInfo.fileMetaInfo, orcReader);
         }
       } else {
         Reader orcReader = createOrcReader();
@@ -1286,8 +1303,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         fileMetaInfo = context.footerInSplits ?
             ((ReaderImpl) orcReader).getFileMetaInfo() : null;
         if (context.cacheStripeDetails) {
-          Long fileId = fileWithId.getFileId();
-          context.footerCache.put(fileId, file, fileMetaInfo, orcReader);
+          context.footerCache.put(fsFileId, file, fileMetaInfo, orcReader);
         }
       }
       includedCols = genIncludedColumns(types, context.conf, isOriginal);
@@ -1338,6 +1354,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       LOG.info("ORC pushdown predicate: " + context.sarg);
     }
     boolean useFileIds = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS);
+    boolean allowSyntheticFileIds = useFileIds && HiveConf.getBoolVar(
+        conf, ConfVars.HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS);
     List<OrcSplit> splits = Lists.newArrayList();
     List<Future<AcidDirInfo>> pathFutures = Lists.newArrayList();
     List<Future<Void>> strategyFutures = Lists.newArrayList();
@@ -1380,8 +1398,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
 
         // We have received a new directory information, make a split strategy.
         --resultsLeft;
-        SplitStrategy<?> splitStrategy = determineSplitStrategy(combinedCtx, context,
-            adi.fs, adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, ugi);
+        SplitStrategy<?> splitStrategy = determineSplitStrategy(combinedCtx, context, adi.fs,
+            adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, ugi, allowSyntheticFileIds);
         if (splitStrategy == null) continue; // Combined.
 
         if (isDebugEnabled) {
@@ -1451,12 +1469,13 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   private static SplitStrategy<?> combineOrCreateETLStrategy(CombinedCtx combinedCtx,
       Context context, FileSystem fs, Path dir, List<HdfsFileStatusWithId> files,
       List<DeltaMetaData> deltas, boolean[] covered, boolean isOriginal,
-      UserGroupInformation ugi) {
+      UserGroupInformation ugi, boolean allowSyntheticFileIds) {
     if (!deltas.isEmpty() || combinedCtx == null) {
-      return new ETLSplitStrategy(context, fs, dir, files, isOriginal, deltas, covered, ugi);
+      return new ETLSplitStrategy(
+          context, fs, dir, files, isOriginal, deltas, covered, ugi, allowSyntheticFileIds);
     } else if (combinedCtx.combined == null) {
       combinedCtx.combined = new ETLSplitStrategy(
-          context, fs, dir, files, isOriginal, deltas, covered, ugi);
+          context, fs, dir, files, isOriginal, deltas, covered, ugi, allowSyntheticFileIds);
       combinedCtx.combineStartUs = System.nanoTime();
       return null;
     } else {
@@ -1465,11 +1484,12 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       switch (r) {
       case YES: return null;
       case NO_AND_CONTINUE:
-        return new ETLSplitStrategy(context, fs, dir, files, isOriginal, deltas, covered, ugi);
+        return new ETLSplitStrategy(
+            context, fs, dir, files, isOriginal, deltas, covered, ugi, allowSyntheticFileIds);
       case NO_AND_SWAP: {
         ETLSplitStrategy oldBase = combinedCtx.combined;
         combinedCtx.combined = new ETLSplitStrategy(
-            context, fs, dir, files, isOriginal, deltas, covered, ugi);
+            context, fs, dir, files, isOriginal, deltas, covered, ugi, allowSyntheticFileIds);
         combinedCtx.combineStartUs = System.nanoTime();
         return oldBase;
       }
@@ -1798,7 +1818,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
   @VisibleForTesting
   static SplitStrategy<?> determineSplitStrategy(CombinedCtx combinedCtx, Context context,
       FileSystem fs, Path dir, AcidUtils.Directory dirInfo,
-      List<HdfsFileStatusWithId> baseOrOriginalFiles, UserGroupInformation ugi) {
+      List<HdfsFileStatusWithId> baseOrOriginalFiles, UserGroupInformation ugi,
+      boolean allowSyntheticFileIds) {
     Path base = dirInfo.getBaseDirectory();
     List<HdfsFileStatusWithId> original = dirInfo.getOriginalFiles();
     List<DeltaMetaData> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
@@ -1826,20 +1847,20 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       switch(context.splitStrategyKind) {
         case BI:
           // BI strategy requested through config
-          return new BISplitStrategy(
-              context, fs, dir, baseOrOriginalFiles, isOriginal, deltas, covered);
+          return new BISplitStrategy(context, fs, dir, baseOrOriginalFiles,
+              isOriginal, deltas, covered, allowSyntheticFileIds);
         case ETL:
           // ETL strategy requested through config
-          return combineOrCreateETLStrategy(combinedCtx, context, fs,
-            dir, baseOrOriginalFiles, deltas, covered, isOriginal, ugi);
+          return combineOrCreateETLStrategy(combinedCtx, context, fs, dir, baseOrOriginalFiles,
+              deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
         default:
           // HYBRID strategy
           if (avgFileSize > context.maxSize || totalFiles <= context.minSplits) {
-            return combineOrCreateETLStrategy(combinedCtx, context, fs,
-                dir, baseOrOriginalFiles, deltas, covered, isOriginal, ugi);
+            return combineOrCreateETLStrategy(combinedCtx, context, fs, dir, baseOrOriginalFiles,
+                deltas, covered, isOriginal, ugi, allowSyntheticFileIds);
           } else {
-            return new BISplitStrategy(
-                context, fs, dir, baseOrOriginalFiles, isOriginal, deltas, covered);
+            return new BISplitStrategy(context, fs, dir, baseOrOriginalFiles,
+                isOriginal, deltas, covered, allowSyntheticFileIds);
           }
       }
     } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
index 4a27ee7..407fd62 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
@@ -26,13 +26,13 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.orc.FileMetaInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.ql.io.ColumnarSplit;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.LlapAwareSplit;
+import org.apache.hadoop.hive.ql.io.SyntheticFileId;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.mapred.FileSplit;
 
@@ -48,11 +48,11 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
   private boolean isOriginal;
   private boolean hasBase;
   private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
-  private OrcFile.WriterVersion writerVersion;
   private long projColsUncompressedSize;
-  private transient Long fileId;
+  private transient Object fileKey;
 
-  static final int HAS_FILEID_FLAG = 8;
+  static final int HAS_SYNTHETIC_FILEID_FLAG = 16;
+  static final int HAS_LONG_FILEID_FLAG = 8;
   static final int BASE_FLAG = 4;
   static final int ORIGINAL_FLAG = 2;
   static final int FOOTER_FLAG = 1;
@@ -64,13 +64,13 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
     super(null, 0, 0, (String[]) null);
   }
 
-  public OrcSplit(Path path, Long fileId, long offset, long length, String[] hosts,
+  public OrcSplit(Path path, Object fileId, long offset, long length, String[] hosts,
       FileMetaInfo fileMetaInfo, boolean isOriginal, boolean hasBase,
       List<AcidInputFormat.DeltaMetaData> deltas, long projectedDataSize) {
     super(path, offset, length, hosts);
-    // We could avoid serializing file ID and just replace the path with inode-based path.
-    // However, that breaks bunch of stuff because Hive later looks up things by split path.
-    this.fileId = fileId;
+    // For HDFS, we could avoid serializing file ID and just replace the path with inode-based
+    // path. However, that breaks bunch of stuff because Hive later looks up things by split path.
+    this.fileKey = fileId;
     this.fileMetaInfo = fileMetaInfo;
     hasFooter = this.fileMetaInfo != null;
     this.isOriginal = isOriginal;
@@ -84,10 +84,12 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
     //serialize path, offset, length using FileSplit
     super.write(out);
 
+    boolean isFileIdLong = fileKey instanceof Long, isFileIdWritable = fileKey instanceof Writable;
     int flags = (hasBase ? BASE_FLAG : 0) |
         (isOriginal ? ORIGINAL_FLAG : 0) |
         (hasFooter ? FOOTER_FLAG : 0) |
-        (fileId != null ? HAS_FILEID_FLAG : 0);
+        (isFileIdLong ? HAS_LONG_FILEID_FLAG : 0) |
+        (isFileIdWritable ? HAS_SYNTHETIC_FILEID_FLAG : 0);
     out.writeByte(flags);
     out.writeInt(deltas.size());
     for(AcidInputFormat.DeltaMetaData delta: deltas) {
@@ -109,8 +111,10 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
           footerBuff.limit() - footerBuff.position());
       WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId());
     }
-    if (fileId != null) {
-      out.writeLong(fileId.longValue());
+    if (isFileIdLong) {
+      out.writeLong(((Long)fileKey).longValue());
+    } else if (isFileIdWritable) {
+      ((Writable)fileKey).write(out);
     }
   }
 
@@ -123,7 +127,11 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
     hasFooter = (FOOTER_FLAG & flags) != 0;
     isOriginal = (ORIGINAL_FLAG & flags) != 0;
     hasBase = (BASE_FLAG & flags) != 0;
-    boolean hasFileId = (HAS_FILEID_FLAG & flags) != 0;
+    boolean hasLongFileId = (HAS_LONG_FILEID_FLAG & flags) != 0,
+        hasWritableFileId = (HAS_SYNTHETIC_FILEID_FLAG & flags) != 0;
+    if (hasLongFileId && hasWritableFileId) {
+      throw new IOException("Invalid split - both file ID types present");
+    }
 
     deltas.clear();
     int numDeltas = in.readInt();
@@ -148,8 +156,12 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
       fileMetaInfo = new FileMetaInfo(compressionType, bufferSize,
           metadataSize, footerBuff, writerVersion);
     }
-    if (hasFileId) {
-      fileId = in.readLong();
+    if (hasLongFileId) {
+      fileKey = in.readLong();
+    } else if (hasWritableFileId) {
+      SyntheticFileId fileId = new SyntheticFileId();
+      fileId.readFields(in);
+      this.fileKey = fileId;
     }
   }
 
@@ -186,8 +198,8 @@ public class OrcSplit extends FileSplit implements ColumnarSplit, LlapAwareSplit
     return projColsUncompressedSize;
   }
 
-  public Long getFileId() {
-    return fileId;
+  public Object getFileKey() {
+    return fileKey;
   }
 
   @Override


[25/51] [abbrv] hive git commit: HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)

Posted by jd...@apache.org.
HIVE-10632 : Make sure TXN_COMPONENTS gets cleaned up if table is dropped before compaction (Wei Zheng, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/456a91ec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/456a91ec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/456a91ec

Branch: refs/heads/llap
Commit: 456a91ecde6a449177a76fb34ad9b5f13983821b
Parents: ff55d0a
Author: Wei Zheng <we...@apache.org>
Authored: Thu Mar 10 14:37:35 2016 -0800
Committer: Wei Zheng <we...@apache.org>
Committed: Thu Mar 10 14:37:35 2016 -0800

----------------------------------------------------------------------
 .../hive/metastore/AcidEventListener.java       |  94 +++++++++
 .../hadoop/hive/metastore/HiveMetaStore.java    |   1 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |  20 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   | 167 ++++++++++++++-
 .../hadoop/hive/metastore/txn/TxnStore.java     |  37 ++--
 .../hadoop/hive/metastore/txn/TxnUtils.java     |  18 ++
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  27 +--
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |  22 +-
 .../hive/ql/lockmgr/TestDbTxnManager2.java      | 209 ++++++++++++++++++-
 9 files changed, 518 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
new file mode 100644
index 0000000..71ad916
--- /dev/null
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
@@ -0,0 +1,94 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+
+
+/**
+ * It handles cleanup of dropped partition/table/database in ACID related metastore tables
+ */
+public class AcidEventListener extends MetaStoreEventListener {
+
+  private TxnStore txnHandler;
+  private HiveConf hiveConf;
+
+  public AcidEventListener(Configuration configuration) {
+    super(configuration);
+    hiveConf = (HiveConf) configuration;
+  }
+
+  @Override
+  public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
+    // We can loop thru all the tables to check if they are ACID first and then perform cleanup,
+    // but it's more efficient to unconditionally perform cleanup for the database, especially
+    // when there are a lot of tables
+    txnHandler = getTxnHandler();
+    txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), null, null);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent)  throws MetaException {
+    if (TxnUtils.isAcidTable(tableEvent.getTable())) {
+      txnHandler = getTxnHandler();
+      txnHandler.cleanupRecords(HiveObjectType.TABLE, null, tableEvent.getTable(), null);
+    }
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent)  throws MetaException {
+    if (TxnUtils.isAcidTable(partitionEvent.getTable())) {
+      txnHandler = getTxnHandler();
+      txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, partitionEvent.getTable(),
+          partitionEvent.getPartitionIterator());
+    }
+  }
+
+  private TxnStore getTxnHandler() {
+    boolean hackOn = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEST) ||
+        HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEZ_TEST);
+    String origTxnMgr = null;
+    boolean origConcurrency = false;
+
+    // Since TxnUtils.getTxnStore calls TxnHandler.setConf -> checkQFileTestHack -> TxnDbUtil.setConfValues,
+    // which may change the values of below two entries, we need to avoid pulluting the original values
+    if (hackOn) {
+      origTxnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER);
+      origConcurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
+    }
+
+    txnHandler = TxnUtils.getTxnStore(hiveConf);
+
+    // Set them back
+    if (hackOn) {
+      hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, origTxnMgr);
+      hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, origConcurrency);
+    }
+
+    return txnHandler;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 0e8a157..f0bc560 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -443,6 +443,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
           hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
       listeners.add(new SessionPropertiesListener(hiveConf));
+      listeners.add(new AcidEventListener(hiveConf));
 
       if (metrics != null) {
         listeners.add(new HMSMetricsListener(hiveConf, metrics));

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 2a7545c..5d10b5c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
- * Utility methods for creating and destroying txn database/schema.
+ * Utility methods for creating and destroying txn database/schema, plus methods for
+ * querying against metastore tables.
  * Placed here in a separate class so it can be shared across unit tests.
  */
 public final class TxnDbUtil {
@@ -142,8 +143,13 @@ public final class TxnDbUtil {
 
       conn.commit();
     } catch (SQLException e) {
+      try {
+        conn.rollback();
+      } catch (SQLException re) {
+        System.err.println("Error rolling back: " + re.getMessage());
+      }
+
       // This might be a deadlock, if so, let's retry
-      conn.rollback();
       if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
         LOG.warn("Caught deadlock, retrying db creation");
         prepDb();
@@ -219,14 +225,20 @@ public final class TxnDbUtil {
     }
   }
 
-  public static int findNumCurrentLocks() throws Exception {
+  /**
+   * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
+   * @param countQuery countQuery text
+   * @return count countQuery result
+   * @throws Exception
+   */
+  public static int countQueryAgent(String countQuery) throws Exception {
     Connection conn = null;
     Statement stmt = null;
     ResultSet rs = null;
     try {
       conn = getConnection();
       stmt = conn.createStatement();
-      rs = stmt.executeQuery("select count(*) from hive_locks");
+      rs = stmt.executeQuery(countQuery);
       if (!rs.next()) {
         return 0;
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index d4d0162..53d2bb4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -25,6 +25,7 @@ import org.apache.commons.dbcp.DriverManagerConnectionFactory;
 import org.apache.commons.dbcp.PoolableConnectionFactory;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.Warehouse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.commons.dbcp.PoolingDataSource;
@@ -1153,6 +1154,170 @@ abstract class TxnHandler implements TxnStore {
   }
 
   /**
+   * Clean up corresponding records in metastore tables, specifically:
+   * TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS, COMPACTION_QUEUE, COMPLETED_COMPACTIONS
+   */
+  public void cleanupRecords(HiveObjectType type, Database db, Table table,
+                             Iterator<Partition> partitionIterator) throws MetaException {
+    try {
+      Connection dbConn = null;
+      Statement stmt = null;
+
+      try {
+        String dbName;
+        String tblName;
+        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+        stmt = dbConn.createStatement();
+        List<String> queries = new ArrayList<String>();
+        StringBuilder buff = new StringBuilder();
+
+        switch (type) {
+          case DATABASE:
+            dbName = db.getName();
+
+            buff.append("delete from TXN_COMPONENTS where tc_database='");
+            buff.append(dbName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
+            buff.append(dbName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPACTION_QUEUE where cq_database='");
+            buff.append(dbName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
+            buff.append(dbName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            break;
+          case TABLE:
+            dbName = table.getDbName();
+            tblName = table.getTableName();
+
+            buff.append("delete from TXN_COMPONENTS where tc_database='");
+            buff.append(dbName);
+            buff.append("' and tc_table='");
+            buff.append(tblName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
+            buff.append(dbName);
+            buff.append("' and ctc_table='");
+            buff.append(tblName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPACTION_QUEUE where cq_database='");
+            buff.append(dbName);
+            buff.append("' and cq_table='");
+            buff.append(tblName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            buff.setLength(0);
+            buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
+            buff.append(dbName);
+            buff.append("' and cc_table='");
+            buff.append(tblName);
+            buff.append("'");
+            queries.add(buff.toString());
+
+            break;
+          case PARTITION:
+            dbName = table.getDbName();
+            tblName = table.getTableName();
+            List<FieldSchema> partCols = table.getPartitionKeys();  // partition columns
+            List<String> partVals;                                  // partition values
+            String partName;
+
+            while (partitionIterator.hasNext()) {
+              Partition p = partitionIterator.next();
+              partVals = p.getValues();
+              partName = Warehouse.makePartName(partCols, partVals);
+
+              buff.append("delete from TXN_COMPONENTS where tc_database='");
+              buff.append(dbName);
+              buff.append("' and tc_table='");
+              buff.append(tblName);
+              buff.append("' and tc_partition='");
+              buff.append(partName);
+              buff.append("'");
+              queries.add(buff.toString());
+
+              buff.setLength(0);
+              buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
+              buff.append(dbName);
+              buff.append("' and ctc_table='");
+              buff.append(tblName);
+              buff.append("' and ctc_partition='");
+              buff.append(partName);
+              buff.append("'");
+              queries.add(buff.toString());
+
+              buff.setLength(0);
+              buff.append("delete from COMPACTION_QUEUE where cq_database='");
+              buff.append(dbName);
+              buff.append("' and cq_table='");
+              buff.append(tblName);
+              buff.append("' and cq_partition='");
+              buff.append(partName);
+              buff.append("'");
+              queries.add(buff.toString());
+
+              buff.setLength(0);
+              buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
+              buff.append(dbName);
+              buff.append("' and cc_table='");
+              buff.append(tblName);
+              buff.append("' and cc_partition='");
+              buff.append(partName);
+              buff.append("'");
+              queries.add(buff.toString());
+            }
+
+            break;
+          default:
+            throw new MetaException("Invalid object type for cleanup: " + type);
+        }
+
+        for (String query : queries) {
+          LOG.debug("Going to execute update <" + query + ">");
+          stmt.executeUpdate(query);
+        }
+
+        LOG.debug("Going to commit");
+        dbConn.commit();
+      } catch (SQLException e) {
+        LOG.debug("Going to rollback");
+        rollbackDBConn(dbConn);
+        checkRetryable(dbConn, e, "cleanupRecords");
+        if (e.getMessage().contains("does not exist")) {
+          LOG.warn("Cannot perform cleanup since metastore table does not exist");
+        } else {
+          throw new MetaException("Unable to clean up " + StringUtils.stringifyException(e));
+        }
+      } finally {
+        closeStmt(stmt);
+        closeDbConn(dbConn);
+      }
+    } catch (RetryException e) {
+      cleanupRecords(type, db, table, partitionIterator);
+    }
+  }
+
+  /**
    * For testing only, do not use.
    */
   @VisibleForTesting
@@ -1599,7 +1764,7 @@ abstract class TxnHandler implements TxnStore {
         TxnDbUtil.prepDb();
       } catch (Exception e) {
         // We may have already created the tables and thus don't need to redo it.
-        if (!e.getMessage().contains("already exists")) {
+        if (e.getMessage() != null && !e.getMessage().contains("already exists")) {
           throw new RuntimeException("Unable to set up transaction database for" +
             " testing: " + e.getMessage(), e);
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 5e0306a..6d738b5 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@ -21,32 +21,10 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
-import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
-import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
-import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
-import org.apache.hadoop.hive.metastore.api.CompactionRequest;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
-import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockRequest;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
-import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
-import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
-import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
-import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
-import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
-import org.apache.hadoop.hive.metastore.api.TxnOpenException;
-import org.apache.hadoop.hive.metastore.api.UnlockRequest;
+import org.apache.hadoop.hive.metastore.api.*;
 
 import java.sql.SQLException;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 
@@ -216,6 +194,17 @@ public interface TxnStore {
       throws NoSuchTxnException,  TxnAbortedException, MetaException;
 
   /**
+   * Clean up corresponding records in metastore tables
+   * @param type Hive object type
+   * @param db database object
+   * @param table table object
+   * @param partitionIterator partition iterator
+   * @throws MetaException
+   */
+  public void cleanupRecords(HiveObjectType type, Database db, Table table,
+                             Iterator<Partition> partitionIterator) throws MetaException;
+
+  /**
    * Timeout transactions and/or locks.  This should only be called by the compactor.
    */
   public void performTimeOuts();

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index b7502c2..0d90b11 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@ -23,11 +23,14 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
 import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Map;
 import java.util.Set;
 
 public class TxnUtils {
@@ -94,4 +97,19 @@ public class TxnUtils {
       throw new RuntimeException(e);
     }
   }
+
+  /** Checks if a table is a valid ACID table.
+   * Note, users are responsible for using the correct TxnManager. We do not look at
+   * SessionState.get().getTxnMgr().supportsAcid() here
+   * @param table table
+   * @return true if table is a legit ACID table, false otherwise
+   */
+  public static boolean isAcidTable(Table table) {
+    if (table == null) {
+      return false;
+    }
+    Map<String, String> parameters = table.getParameters();
+    String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+    return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 9bf9377..2b50a2a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hive.ql.io;
 
-import org.apache.hadoop.mapred.InputFormat;
-import org.apache.hadoop.mapred.OutputFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -706,12 +704,7 @@ public class AcidUtils {
     HiveConf.setBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, isAcidTable);
   }
 
-  /** Checks metadata to make sure it's a valid ACID table at metadata level
-   * Three things we will check:
-   * 1. TBLPROPERTIES 'transactional'='true'
-   * 2. The table should be bucketed
-   * 3. InputFormatClass/OutputFormatClass should implement AcidInputFormat/AcidOutputFormat
-   *    Currently OrcInputFormat/OrcOutputFormat is the only implementer
+  /** Checks if a table is a valid ACID table.
    * Note, users are responsible for using the correct TxnManager. We do not look at
    * SessionState.get().getTxnMgr().supportsAcid() here
    * @param table table
@@ -725,23 +718,7 @@ public class AcidUtils {
     if (tableIsTransactional == null) {
       tableIsTransactional = table.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.toUpperCase());
     }
-    if (tableIsTransactional == null || !tableIsTransactional.equalsIgnoreCase("true")) {
-      return false;
-    }
-
-    List<String> bucketCols = table.getBucketCols();
-    if (bucketCols == null || bucketCols.isEmpty()) {
-      return false;
-    }
-
-    Class<? extends InputFormat> inputFormatClass = table.getInputFormatClass();
-    Class<? extends OutputFormat> outputFormatClass = table.getOutputFormatClass();
-    if (inputFormatClass == null || outputFormatClass == null ||
-        !AcidInputFormat.class.isAssignableFrom(inputFormatClass) ||
-        !AcidOutputFormat.class.isAssignableFrom(outputFormatClass)) {
-      return false;
-    }
 
-    return true;
+    return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index f4debfe..9b00435 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -331,14 +331,7 @@ public class TestTxnCommands2 {
 
     // 4. Perform a major compaction
     runStatementOnDriver("alter table "+ Table.NONACIDORCTBL + " compact 'MAJOR'");
-    Worker w = new Worker();
-    w.setThreadId((int) w.getId());
-    w.setHiveConf(hiveConf);
-    AtomicBoolean stop = new AtomicBoolean();
-    AtomicBoolean looped = new AtomicBoolean();
-    stop.set(true);
-    w.init(stop, looped);
-    w.run();
+    runWorker(hiveConf);
     // There should be 1 new directory: base_xxxxxxx.
     // Original bucket files and delta directory should stay until Cleaner kicks in.
     status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
@@ -375,14 +368,7 @@ public class TestTxnCommands2 {
     // Before Cleaner, there should be 5 items:
     // 2 original files, 1 original directory, 1 base directory and 1 delta directory
     Assert.assertEquals(5, status.length);
-    Cleaner c = new Cleaner();
-    c.setThreadId((int) c.getId());
-    c.setHiveConf(hiveConf);
-    stop = new AtomicBoolean();
-    looped = new AtomicBoolean();
-    stop.set(true);
-    c.init(stop, looped);
-    c.run();
+    runCleaner(hiveConf);
     // There should be only 1 directory left: base_xxxxxxx.
     // Original bucket files and delta directory should have been cleaned up.
     status = fs.listStatus(new Path(TEST_WAREHOUSE_DIR + "/" +
@@ -596,7 +582,7 @@ public class TestTxnCommands2 {
     }
     return compactionsByState;
   }
-  private static void runWorker(HiveConf hiveConf) throws MetaException {
+  public static void runWorker(HiveConf hiveConf) throws MetaException {
     AtomicBoolean stop = new AtomicBoolean(true);
     Worker t = new Worker();
     t.setThreadId((int) t.getId());
@@ -605,7 +591,7 @@ public class TestTxnCommands2 {
     t.init(stop, looped);
     t.run();
   }
-  private static void runCleaner(HiveConf hiveConf) throws MetaException {
+  public static void runCleaner(HiveConf hiveConf) throws MetaException {
     AtomicBoolean stop = new AtomicBoolean(true);
     Cleaner t = new Cleaner();
     t.setThreadId((int) t.getId());

http://git-wip-us.apache.org/repos/asf/hive/blob/456a91ec/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 1b07d4b..d1b370e 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -248,34 +248,233 @@ public class TestDbTxnManager2 {
     cpr = driver.run("create table T11 (a int, b int) clustered by(b) into 2 buckets stored as orc");
     checkCmdOnDriver(cpr);
 
-    // Now switch to DummyTxnManager
-    conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
-    txnMgr = SessionState.get().initTxnMgr(conf);
-    Assert.assertTrue(txnMgr instanceof DummyTxnManager);
-
     // All DML should fail with DummyTxnManager on ACID table
+    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("select * from T10");
     Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("This command is not allowed on an ACID table"));
 
+    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("insert into table T10 values (1, 2)");
     Assert.assertEquals(ErrorMsg.TXNMGR_NOT_ACID.getErrorCode(), cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("This command is not allowed on an ACID table"));
 
+    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("insert overwrite table T10 select a, b from T11");
     Assert.assertEquals(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getErrorCode(), cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("INSERT OVERWRITE not allowed on table with OutputFormat" +
         " that implements AcidOutputFormat while transaction manager that supports ACID is in use"));
 
+    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("update T10 set a=0 where b=1");
     Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations."));
 
+    useDummyTxnManagerTemporarily(conf);
     cpr = driver.compileAndRespond("delete from T10");
     Assert.assertEquals(ErrorMsg.ACID_OP_ON_NONACID_TXNMGR.getErrorCode(), cpr.getResponseCode());
     Assert.assertTrue(cpr.getErrorMessage().contains("Attempt to do update or delete using transaction manager that does not support these operations."));
   }
 
+  /**
+   * Temporarily set DummyTxnManager as the txn manager for the session.
+   * HIVE-10632: we have to do this for every new query, because this jira introduced an AcidEventListener
+   * in HiveMetaStore, which will instantiate a txn handler, but due to HIVE-12902, we have to call
+   * TxnHandler.setConf and TxnHandler.checkQFileTestHack and TxnDbUtil.setConfValues, which will
+   * set txn manager back to DbTxnManager.
+   */
+  private void useDummyTxnManagerTemporarily(HiveConf hiveConf) throws Exception {
+    hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
+    txnMgr = SessionState.get().initTxnMgr(hiveConf);
+    Assert.assertTrue(txnMgr instanceof DummyTxnManager);
+  }
+
+  /**
+   * Normally the compaction process will clean up records in TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS,
+   * COMPACTION_QUEUE and COMPLETED_COMPACTIONS. But if a table/partition has been dropped before
+   * compaction and there are still relevant records in those metastore tables, the Initiator will
+   * complain about not being able to find the table/partition. This method is to test and make sure
+   * we clean up relevant records as soon as a table/partition is dropped.
+   *
+   * Note, here we don't need to worry about cleaning up TXNS table, since it's handled separately.
+   * @throws Exception
+   */
+  @Test
+  public void testMetastoreTablesCleanup() throws Exception {
+    CommandProcessorResponse cpr = driver.run("create database if not exists temp");
+    checkCmdOnDriver(cpr);
+
+    // Create some ACID tables: T10, T11 - unpartitioned table, T12p, T13p - partitioned table
+    cpr = driver.run("create table temp.T10 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table temp.T11 (a int, b int) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table temp.T12p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("create table temp.T13p (a int, b int) partitioned by (ds string, hour string) clustered by(b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')");
+    checkCmdOnDriver(cpr);
+
+    // Successfully insert some data into ACID tables, so that we have records in COMPLETED_TXN_COMPONENTS
+    cpr = driver.run("insert into temp.T10 values (1, 1)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T10 values (2, 2)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T11 values (3, 3)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T11 values (4, 4)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (5, 5)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (6, 6)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (7, 7)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T13p partition (ds='tomorrow', hour='2') values (8, 8)");
+    checkCmdOnDriver(cpr);
+    int count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11')");
+    Assert.assertEquals(4, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t12p', 't13p')");
+    Assert.assertEquals(4, count);
+
+    // Fail some inserts, so that we have records in TXN_COMPONENTS
+    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true);
+    cpr = driver.run("insert into temp.T10 values (9, 9)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T11 values (10, 10)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)");
+    checkCmdOnDriver(cpr);
+    cpr = driver.run("insert into temp.T13p partition (ds='today', hour='1') values (12, 12)");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(4, count);
+    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false);
+
+    // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'");
+    Assert.assertEquals(1, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'");
+    Assert.assertEquals(2, count);
+    cpr = driver.run("drop table temp.T10");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t10'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t10'");
+    Assert.assertEquals(0, count);
+
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'");
+    Assert.assertEquals(1, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'");
+    Assert.assertEquals(1, count);
+    cpr = driver.run("alter table temp.T12p drop partition (ds='today', hour='1')");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE='t12p' and TC_PARTITION='ds=today/hour=1'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE='t12p' and CTC_PARTITION='ds=today/hour=1'");
+    Assert.assertEquals(0, count);
+
+    // Successfully perform compaction on a table/partition, so that we have successful records in COMPLETED_COMPACTIONS
+    cpr = driver.run("alter table temp.T11 compact 'minor'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='i'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='r' and CQ_TYPE='i'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='s' and CC_TYPE='i'");
+    Assert.assertEquals(1, count);
+
+    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'minor'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='i'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='r' and CQ_TYPE='i'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runCleaner(conf);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='s' and CC_TYPE='i'");
+    Assert.assertEquals(1, count);
+
+    // Fail compaction, so that we have failed records in COMPLETED_COMPACTIONS
+    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true);
+    cpr = driver.run("alter table temp.T11 compact 'major'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); // will fail
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11' and CC_STATE='f' and CC_TYPE='a'");
+    Assert.assertEquals(1, count);
+
+    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(1, count);
+    org.apache.hadoop.hive.ql.TestTxnCommands2.runWorker(conf); // will fail
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p' and CC_STATE='f' and CC_TYPE='a'");
+    Assert.assertEquals(1, count);
+    conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false);
+
+    // Put 2 records into COMPACTION_QUEUE and do nothing
+    cpr = driver.run("alter table temp.T11 compact 'major'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(1, count);
+    cpr = driver.run("alter table temp.T12p partition (ds='tomorrow', hour='2') compact 'major'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p' and CQ_PARTITION='ds=tomorrow/hour=2' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(1, count);
+
+    // Drop a table/partition, corresponding records in COMPACTION_QUEUE and COMPLETED_COMPACTIONS should disappear
+    cpr = driver.run("drop table temp.T11");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t11'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t11'");
+    Assert.assertEquals(0, count);
+
+    cpr = driver.run("alter table temp.T12p drop partition (ds='tomorrow', hour='2')");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t12p'");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE='t12p'");
+    Assert.assertEquals(0, count);
+
+    // Put 1 record into COMPACTION_QUEUE and do nothing
+    cpr = driver.run("alter table temp.T13p partition (ds='today', hour='1') compact 'major'");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE='t13p' and CQ_STATE='i' and CQ_TYPE='a'");
+    Assert.assertEquals(1, count);
+
+    // Drop database, everything in all 4 meta tables should disappear
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(1, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(2, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(1, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(0, count);
+    cpr = driver.run("drop database if exists temp cascade");
+    checkCmdOnDriver(cpr);
+    count = TxnDbUtil.countQueryAgent("select count(*) from TXN_COMPONENTS where TC_DATABASE='temp' and TC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_TXN_COMPONENTS where CTC_DATABASE='temp' and CTC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPACTION_QUEUE where CQ_DATABASE='temp' and CQ_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(0, count);
+    count = TxnDbUtil.countQueryAgent("select count(*) from COMPLETED_COMPACTIONS where CC_DATABASE='temp' and CC_TABLE in ('t10', 't11', 't12p', 't13p')");
+    Assert.assertEquals(0, count);
+  }
+
   private void checkLock(LockType type, LockState state, String db, String table, String partition, ShowLocksResponseElement l) {
     Assert.assertEquals(l.toString(),l.getType(), type);
     Assert.assertEquals(l.toString(),l.getState(), state);


[20/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fdc9cafe
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fdc9cafe
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fdc9cafe

Branch: refs/heads/llap
Commit: fdc9cafe1f0a609efdfe1f98b06a82559e059ddf
Parents: 5bf324e
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Mar 9 08:47:30 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Mar 9 13:31:52 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |   8 ++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  15 ++-
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |  26 ++++
 .../clientnegative/authorization_part.q.out     |   2 +
 .../clientnegative/authorization_view_1.q.out   |   2 +
 .../clientnegative/authorization_view_2.q.out   |   2 +
 .../clientnegative/authorization_view_3.q.out   |   2 +
 .../clientnegative/authorization_view_4.q.out   |   2 +
 .../authorization_view_disable_cbo_1.q.out      |   2 +
 .../authorization_view_disable_cbo_2.q.out      |   2 +
 .../authorization_view_disable_cbo_3.q.out      |   2 +
 .../authorization_view_disable_cbo_4.q.out      |   2 +
 .../clientnegative/updateBasicStats.q.out       |   2 +
 .../clientpositive/allcolref_in_udf.q.out       |   1 +
 .../alter_rename_partition_authorization.q.out  |   2 +
 .../alter_table_update_status.q.out             |   2 +
 .../clientpositive/annotate_stats_table.q.out   |   1 +
 .../clientpositive/authorization_1.q.out        |   2 +
 .../clientpositive/authorization_2.q.out        |   2 +
 .../clientpositive/authorization_3.q.out        |   2 +
 .../clientpositive/authorization_4.q.out        |   2 +
 .../clientpositive/authorization_6.q.out        |   2 +
 .../authorization_create_temp_table.q.out       |   2 +
 .../clientpositive/authorization_view_1.q.out   |   2 +
 .../authorization_view_disable_cbo_1.q.out      |   2 +
 .../clientpositive/autogen_colalias.q.out       |  22 ++++
 .../cbo_SortUnionTransposeRule.q.out            |   2 +
 .../cbo_rp_cross_product_check_2.q.out          |   4 +
 .../clientpositive/cbo_rp_unionDistinct_2.q.out |   6 +
 .../clientpositive/char_nested_types.q.out      |   2 +
 .../clientpositive/colstats_all_nulls.q.out     |   3 +
 .../clientpositive/create_default_prop.q.out    |   1 +
 .../clientpositive/cross_product_check_1.q.out  |   4 +
 .../clientpositive/cross_product_check_2.q.out  |   4 +
 ql/src/test/results/clientpositive/ctas.q.out   |  12 ++
 .../test/results/clientpositive/ctas_char.q.out |   2 +
 .../results/clientpositive/ctas_colname.q.out   |  21 ++++
 .../test/results/clientpositive/ctas_date.q.out |   4 +
 .../ctas_uses_database_location.q.out           |   2 +
 .../results/clientpositive/ctas_varchar.q.out   |   2 +
 ql/src/test/results/clientpositive/cte_2.q.out  |   1 +
 ql/src/test/results/clientpositive/cte_4.q.out  |   1 +
 .../test/results/clientpositive/database.q.out  |   2 +
 .../results/clientpositive/dbtxnmgr_ddl1.q.out  |   2 +
 .../test/results/clientpositive/decimal_6.q.out |   2 +
 .../results/clientpositive/decimal_join2.q.out  |   2 +
 .../results/clientpositive/decimal_serde.q.out  |   6 +
 .../results/clientpositive/empty_join.q.out     |   2 +
 .../results/clientpositive/explain_ddl.q.out    |   2 +
 .../results/clientpositive/global_limit.q.out   |   1 +
 .../clientpositive/groupby_duplicate_key.q.out  |   3 +
 .../test/results/clientpositive/input46.q.out   |   4 +
 .../test/results/clientpositive/insert0.q.out   |   2 +
 ql/src/test/results/clientpositive/join41.q.out |   2 +
 ql/src/test/results/clientpositive/join42.q.out |  10 ++
 .../clientpositive/join_filters_overlap.q.out   |   2 +
 .../clientpositive/lateral_view_outer.q.out     |   2 +
 .../results/clientpositive/llap/cte_2.q.out     |   1 +
 .../results/clientpositive/llap/cte_4.q.out     |   1 +
 .../llap/dynamic_partition_pruning.q.out        |  10 ++
 .../llap/hybridgrace_hashjoin_1.q.out           |   4 +
 .../clientpositive/llap/llap_nullscan.q.out     |   4 +
 .../results/clientpositive/llap/llap_udf.q.out  |   2 +
 .../clientpositive/llap/llapdecider.q.out       |   2 +
 .../results/clientpositive/llap/tez_dml.q.out   |   4 +
 .../llap/tez_join_result_complex.q.out          |  32 +++++
 .../results/clientpositive/llap/tez_union.q.out |  11 ++
 .../vectorized_dynamic_partition_pruning.q.out  |  10 ++
 .../clientpositive/llap_partitioned.q.out       |   4 +
 .../clientpositive/llap_uncompressed.q.out      |  12 ++
 ql/src/test/results/clientpositive/merge3.q.out |   4 +
 .../multi_insert_lateral_view.q.out             |   2 +
 .../clientpositive/multi_insert_union_src.q.out |   2 +
 .../clientpositive/multi_join_union.q.out       |   8 ++
 .../results/clientpositive/nestedvirtual.q.out  |   9 ++
 .../clientpositive/non_ascii_literal2.q.out     |   2 +
 .../results/clientpositive/nullformatCTAS.q.out |   2 +
 .../results/clientpositive/orc_createas1.q.out  |   5 +
 .../test/results/clientpositive/orc_llap.q.out  |  42 +++++++
 .../clientpositive/parallel_orderby.q.out       |   4 +
 .../results/clientpositive/parquet_ctas.q.out   |   9 ++
 .../results/clientpositive/parquet_join.q.out   |   5 +
 .../parquet_map_null.q.java1.7.out              |   1 +
 .../parquet_map_of_arrays_of_ints.q.out         |   1 +
 .../clientpositive/parquet_map_of_maps.q.out    |   1 +
 .../parquet_mixed_partition_formats2.q.out      |   4 +
 .../clientpositive/parquet_nested_complex.q.out |   5 +
 .../parquet_schema_evolution.q.out              |   2 +
 ...arquet_write_correct_definition_levels.q.out |   1 +
 .../clientpositive/partition_decode_name.q.out  |   2 +
 .../clientpositive/partition_special_char.q.out |   2 +
 .../query_result_fileformat.q.out               |   2 +
 .../clientpositive/rcfile_createas1.q.out       |   3 +
 .../clientpositive/rcfile_default_format.q.out  |   8 ++
 .../sample_islocalmode_hook.q.out               |   4 +
 .../clientpositive/select_same_col.q.out        |   2 +
 .../test/results/clientpositive/semijoin.q.out  |   6 +
 .../test/results/clientpositive/semijoin3.q.out |   4 +
 .../clientpositive/skewjoin_noskew.q.out        |   2 +
 .../clientpositive/skewjoin_onesideskew.q.out   |   2 +
 .../results/clientpositive/smb_mapjoin9.q.out   |   4 +
 .../spark/cross_product_check_1.q.out           |   4 +
 .../spark/cross_product_check_2.q.out           |   4 +
 .../results/clientpositive/spark/ctas.q.out     |  12 ++
 .../results/clientpositive/spark/join41.q.out   |   2 +
 .../spark/join_filters_overlap.q.out            |   2 +
 .../spark/multi_insert_lateral_view.q.out       |   2 +
 .../clientpositive/spark/multi_join_union.q.out |   8 ++
 .../clientpositive/spark/parallel_orderby.q.out |   4 +
 .../clientpositive/spark/parquet_join.q.out     |   5 +
 .../results/clientpositive/spark/semijoin.q.out |   6 +
 .../clientpositive/spark/skewjoin_noskew.q.out  |   2 +
 .../results/clientpositive/spark/stats5.q.out   |   2 +
 .../clientpositive/spark/temp_table_join1.q.out |   2 +
 .../results/clientpositive/spark/union24.q.out  |   8 ++
 .../results/clientpositive/spark/union27.q.out  |   6 +
 .../results/clientpositive/spark/union31.q.out  |   8 ++
 .../results/clientpositive/spark/union32.q.out  |   4 +
 .../clientpositive/spark/union_top_level.q.out  |   2 +
 .../spark/vector_between_in.q.out               |   4 +
 .../spark/vector_decimal_aggregate.q.out        |   4 +
 .../spark/vector_outer_join1.q.out              |  60 ++++++++++
 .../spark/vector_outer_join2.q.out              |  60 ++++++++++
 .../spark/vector_outer_join3.q.out              |  60 ++++++++++
 .../spark/vector_outer_join4.q.out              |  60 ++++++++++
 .../spark/vector_outer_join5.q.out              |   8 ++
 .../spark/vectorization_decimal_date.q.out      |   4 +
 .../spark/vectorization_short_regress.q.out     |  12 ++
 .../special_character_in_tabnames_1.q.out       |   2 +
 ql/src/test/results/clientpositive/stats5.q.out |   2 +
 .../clientpositive/str_to_map.q.java1.7.out     |   1 +
 .../results/clientpositive/temp_table.q.out     |   2 +
 .../clientpositive/temp_table_join1.q.out       |   2 +
 .../tez/cross_product_check_1.q.out             |   4 +
 .../tez/cross_product_check_2.q.out             |   4 +
 .../test/results/clientpositive/tez/ctas.q.out  |  12 ++
 .../test/results/clientpositive/tez/cte_2.q.out |   1 +
 .../test/results/clientpositive/tez/cte_4.q.out |   1 +
 .../tez/dynamic_partition_pruning.q.out         |  10 ++
 .../results/clientpositive/tez/empty_join.q.out |   2 +
 .../clientpositive/tez/explainuser_1.q.out      |   4 +
 .../clientpositive/tez/explainuser_3.q.out      |   2 +
 .../tez/hybridgrace_hashjoin_1.q.out            |   4 +
 .../clientpositive/tez/llap_nullscan.q.out      |   4 +
 .../clientpositive/tez/llapdecider.q.out        |   2 +
 .../results/clientpositive/tez/temp_table.q.out |   2 +
 .../results/clientpositive/tez/tez_dml.q.out    |   4 +
 .../tez/tez_join_result_complex.q.out           |  32 +++++
 .../results/clientpositive/tez/tez_union.q.out  |  11 ++
 .../clientpositive/tez/unionDistinct_1.q.out    |  26 ++++
 .../clientpositive/tez/unionDistinct_2.q.out    |   6 +
 .../clientpositive/tez/union_fast_stats.q.out   | 120 +++++++++++++++++++
 .../tez/vector_between_columns.q.out            |   4 +
 .../clientpositive/tez/vector_between_in.q.out  |   4 +
 .../tez/vector_char_mapjoin1.q.out              |   6 +
 .../tez/vector_decimal_10_0.q.out               |   1 +
 .../clientpositive/tez/vector_decimal_3.q.out   |   2 +
 .../clientpositive/tez/vector_decimal_6.q.out   |   2 +
 .../tez/vector_decimal_aggregate.q.out          |   4 +
 .../tez/vector_decimal_expressions.q.out        |   3 +
 .../tez/vector_decimal_math_funcs.q.out         |   4 +
 .../tez/vector_grouping_sets.q.out              |  29 +++++
 .../tez/vector_interval_mapjoin.q.out           |  26 ++++
 .../clientpositive/tez/vector_join30.q.out      |   2 +
 .../tez/vector_join_filters.q.out               |   2 +
 .../clientpositive/tez/vector_join_nulls.q.out  |   2 +
 .../tez/vector_leftsemi_mapjoin.q.out           |   6 +
 .../tez/vector_multi_insert.q.out               |   1 +
 .../tez/vector_nullsafe_join.q.out              |   2 +
 .../clientpositive/tez/vector_outer_join1.q.out |  60 ++++++++++
 .../clientpositive/tez/vector_outer_join2.q.out |  60 ++++++++++
 .../clientpositive/tez/vector_outer_join3.q.out |  60 ++++++++++
 .../clientpositive/tez/vector_outer_join4.q.out |  60 ++++++++++
 .../clientpositive/tez/vector_outer_join5.q.out |   8 ++
 .../clientpositive/tez/vector_outer_join6.q.out |  12 ++
 .../tez/vector_partitioned_date_time.q.out      |   6 +
 .../tez/vector_reduce_groupby_decimal.q.out     |   4 +
 .../tez/vector_varchar_mapjoin1.q.out           |   6 +
 .../tez/vectorization_decimal_date.q.out        |   4 +
 .../tez/vectorization_short_regress.q.out       |  12 ++
 .../vectorized_dynamic_partition_pruning.q.out  |  10 ++
 .../clientpositive/udf_unix_timestamp.q.out     |   2 +
 .../test/results/clientpositive/union24.q.out   |   8 ++
 .../test/results/clientpositive/union27.q.out   |   6 +
 .../test/results/clientpositive/union31.q.out   |   8 ++
 .../test/results/clientpositive/union32.q.out   |   4 +
 .../clientpositive/unionDistinct_1.q.out        |  26 ++++
 .../clientpositive/unionDistinct_2.q.out        |   6 +
 .../clientpositive/union_fast_stats.q.out       | 120 +++++++++++++++++++
 .../clientpositive/union_top_level.q.out        |   2 +
 .../clientpositive/updateAccessTime.q.out       |   2 +
 .../clientpositive/updateBasicStats.q.out       |   2 +
 .../clientpositive/varchar_nested_types.q.out   |   2 +
 .../clientpositive/vector_between_columns.q.out |   4 +
 .../clientpositive/vector_between_in.q.out      |   4 +
 .../clientpositive/vector_char_mapjoin1.q.out   |   6 +
 .../clientpositive/vector_decimal_10_0.q.out    |   1 +
 .../clientpositive/vector_decimal_3.q.out       |   2 +
 .../clientpositive/vector_decimal_6.q.out       |   2 +
 .../vector_decimal_aggregate.q.out              |   4 +
 .../vector_decimal_expressions.q.out            |   3 +
 .../vector_decimal_math_funcs.q.out             |   4 +
 .../clientpositive/vector_grouping_sets.q.out   |  29 +++++
 .../vector_interval_mapjoin.q.out               |  26 ++++
 .../results/clientpositive/vector_join30.q.out  |   2 +
 .../clientpositive/vector_join_filters.q.out    |   2 +
 .../clientpositive/vector_join_nulls.q.out      |   2 +
 .../vector_leftsemi_mapjoin.q.out               |   6 +
 .../clientpositive/vector_multi_insert.q.out    |   1 +
 .../clientpositive/vector_nullsafe_join.q.out   |   2 +
 .../clientpositive/vector_outer_join1.q.out     |  60 ++++++++++
 .../clientpositive/vector_outer_join2.q.out     |  60 ++++++++++
 .../clientpositive/vector_outer_join3.q.out     |  60 ++++++++++
 .../clientpositive/vector_outer_join4.q.out     |  60 ++++++++++
 .../clientpositive/vector_outer_join5.q.out     |   8 ++
 .../clientpositive/vector_outer_join6.q.out     |  12 ++
 .../vector_partitioned_date_time.q.out          |   6 +
 .../vector_reduce_groupby_decimal.q.out         |   4 +
 .../vector_varchar_mapjoin1.q.out               |   6 +
 .../vectorization_decimal_date.q.out            |   4 +
 .../vectorization_short_regress.q.out           |  12 ++
 .../clientpositive/windowing_navfn.q.out        |   2 +
 .../clientpositive/windowing_streaming.q.out    |   6 +
 223 files changed, 2079 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index c51cfd6..2a64cfa 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.exec.ArchiveUtils.PartSpecInfo;
 import org.apache.hadoop.hive.ql.exec.tez.TezTask;
+import org.apache.hadoop.hive.ql.hooks.LineageInfo.DataContainer;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
 import org.apache.hadoop.hive.ql.hooks.WriteEntity;
 import org.apache.hadoop.hive.ql.io.AcidUtils;
@@ -3919,6 +3920,13 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
       }
     } else {
       db.createTable(tbl, crtTbl.getIfNotExists());
+      if ( crtTbl.isCTAS()) {
+        Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
+        DataContainer dc = new DataContainer(createdTable.getTTable());
+        SessionState.get().getLineageState().setLineage(
+                createdTable.getPath(), dc, createdTable.getCols()
+        );
+      }
     }
     work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
     return 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 9ab091d..633c212 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -6910,6 +6910,19 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     if (ltd != null && SessionState.get() != null) {
       SessionState.get().getLineageState()
           .mapDirToFop(ltd.getSourcePath(), (FileSinkOperator) output);
+    } else if ( SessionState.get().getCommandType().equals(HiveOperation.CREATETABLE_AS_SELECT.getOperationName())) {
+
+      Path tlocation = null;
+      String tName = Utilities.getDbTableName(tableDesc.getTableName())[1];
+      try {
+        Warehouse wh = new Warehouse(conf);
+        tlocation = wh.getTablePath(db.getDatabase(tableDesc.getDatabaseName()), tName);
+      } catch (MetaException|HiveException e) {
+        throw new SemanticException(e);
+      }
+
+      SessionState.get().getLineageState()
+              .mapDirToFop(tlocation, (FileSinkOperator) output);
     }
 
     if (LOG.isDebugEnabled()) {
@@ -11341,7 +11354,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
           rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(),
           storageFormat.getOutputFormat(), location, storageFormat.getSerde(),
           storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists,
-          skewedColNames, skewedValues);
+          skewedColNames, skewedValues, true);
       tableDesc.setMaterialization(isMaterialization);
       tableDesc.setStoredAsSubDirectories(storedAsDirs);
       tableDesc.setNullFormat(rowFormatParams.nullFormat);

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
index 8b2ac3b..03b4d8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
@@ -87,6 +87,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
   boolean isTemporary = false;
   private boolean isMaterialization = false;
   private boolean replaceMode = false;
+  private boolean isCTAS = false;
 
   public CreateTableDesc() {
   }
@@ -111,6 +112,27 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
     this.databaseName = databaseName;
   }
 
+  public CreateTableDesc(String databaseName, String tableName, boolean isExternal, boolean isTemporary,
+                         List<FieldSchema> cols, List<FieldSchema> partCols,
+                         List<String> bucketCols, List<Order> sortCols, int numBuckets,
+                         String fieldDelim, String fieldEscape, String collItemDelim,
+                         String mapKeyDelim, String lineDelim, String comment, String inputFormat,
+                         String outputFormat, String location, String serName,
+                         String storageHandler,
+                         Map<String, String> serdeProps,
+                         Map<String, String> tblProps,
+                         boolean ifNotExists, List<String> skewedColNames, List<List<String>> skewedColValues,
+                         boolean isCTAS) {
+    this(databaseName, tableName, isExternal, isTemporary, cols, partCols,
+            bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape,
+            collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat,
+            outputFormat, location, serName, storageHandler, serdeProps,
+            tblProps, ifNotExists, skewedColNames, skewedColValues);
+    this.isCTAS = isCTAS;
+
+  }
+
+
   public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary,
       List<FieldSchema> cols, List<FieldSchema> partCols,
       List<String> bucketCols, List<Order> sortCols, int numBuckets,
@@ -589,6 +611,10 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
     return replaceMode;
   }
 
+  public boolean isCTAS() {
+    return isCTAS;
+  }
+
   public Table toTable(HiveConf conf) throws HiveException {
     String databaseName = getDatabaseName();
     String tableName = getTableName();

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_part.q.out b/ql/src/test/results/clientnegative/authorization_part.q.out
index 6238782..7a31214 100644
--- a/ql/src/test/results/clientnegative/authorization_part.q.out
+++ b/ql/src/test/results/clientnegative/authorization_part.q.out
@@ -28,6 +28,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_auth
+POSTHOOK: Lineage: src_auth.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_auth.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: grant Create on table authorization_part_fail to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@authorization_part_fail

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_1.q.out b/ql/src/test/results/clientnegative/authorization_view_1.q.out
index 2feb0d8..72b134f 100644
--- a/ql/src/test/results/clientnegative/authorization_view_1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_1.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_2.q.out b/ql/src/test/results/clientnegative/authorization_view_2.q.out
index 0f8bd13..e905299 100644
--- a/ql/src/test/results/clientnegative/authorization_view_2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_2.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_3.q.out b/ql/src/test/results/clientnegative/authorization_view_3.q.out
index e6d2352..3e8f98d 100644
--- a/ql/src/test/results/clientnegative/authorization_view_3.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_3.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_4.q.out b/ql/src/test/results/clientnegative/authorization_view_4.q.out
index 371d407..dbcc769 100644
--- a/ql/src/test/results/clientnegative/authorization_view_4.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_4.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out
index 2feb0d8..72b134f 100644
--- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out
index 0f8bd13..e905299 100644
--- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out
index e6d2352..3e8f98d 100644
--- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out
index 371d407..dbcc769 100644
--- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientnegative/updateBasicStats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/updateBasicStats.q.out b/ql/src/test/results/clientnegative/updateBasicStats.q.out
index 3c4fe39..89196e0 100644
--- a/ql/src/test/results/clientnegative/updateBasicStats.q.out
+++ b/ql/src/test/results/clientnegative/updateBasicStats.q.out
@@ -8,4 +8,6 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 FAILED: SemanticException AlterTable numRows failed with value NaN

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
index 1410d5e..eda49ed 100644
--- a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
+++ b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out
@@ -172,6 +172,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@allcolref
+POSTHOOK: Lineage: allcolref.c0 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select explode(*) as x from allcolref limit 10
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select explode(*) as x from allcolref limit 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out
index d35bf40..79489ad 100644
--- a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out
+++ b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_auth_tmp
+POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/alter_table_update_status.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
index e30946f..6bb6e8e 100644
--- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out
+++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_stat
+POSTHOOK: Lineage: src_stat.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_stat.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table src_stat_int (
   key         double,
   value       string

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/annotate_stats_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
index cec3a1f..6db4ded 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out
@@ -299,6 +299,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tmp
+POSTHOOK: Lineage: tmp._c0 SIMPLE []
 PREHOOK: query: explain create table tmp as select 1
 PREHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: query: explain create table tmp as select 1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_1.q.out b/ql/src/test/results/clientpositive/authorization_1.q.out
index f9f1b34..5d73485 100644
--- a/ql/src/test/results/clientpositive/authorization_1.q.out
+++ b/ql/src/test/results/clientpositive/authorization_1.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: --table grant to user
 
 grant select on table src_autho_test to user hive_test_user

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_2.q.out b/ql/src/test/results/clientpositive/authorization_2.q.out
index e92763c..449cd02 100644
--- a/ql/src/test/results/clientpositive/authorization_2.q.out
+++ b/ql/src/test/results/clientpositive/authorization_2.q.out
@@ -20,6 +20,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_auth_tmp
+POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LEVEL_PRIVILEGE"="TRUE")
 PREHOOK: type: ALTERTABLE_PROPERTIES
 PREHOOK: Input: default@authorization_part

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_3.q.out b/ql/src/test/results/clientpositive/authorization_3.q.out
index 2b53233..14c1466 100644
--- a/ql/src/test/results/clientpositive/authorization_3.q.out
+++ b/ql/src/test/results/clientpositive/authorization_3.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: grant drop on table src_autho_test to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_4.q.out b/ql/src/test/results/clientpositive/authorization_4.q.out
index 67a30fd..7bf00cd 100644
--- a/ql/src/test/results/clientpositive/authorization_4.q.out
+++ b/ql/src/test/results/clientpositive/authorization_4.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: grant All on table src_autho_test to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_6.q.out b/ql/src/test/results/clientpositive/authorization_6.q.out
index 0341094..bfe9f76 100644
--- a/ql/src/test/results/clientpositive/authorization_6.q.out
+++ b/ql/src/test/results/clientpositive/authorization_6.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_auth_tmp
+POSTHOOK: Lineage: src_auth_tmp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_auth_tmp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out b/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out
index c87862d..6b2c74c 100644
--- a/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out
+++ b/ql/src/test/results/clientpositive/authorization_create_temp_table.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@authorization_create_temp_table_1
+POSTHOOK: Lineage: authorization_create_temp_table_1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: authorization_create_temp_table_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: grant select on authorization_create_temp_table_1 to user user1
 PREHOOK: type: GRANT_PRIVILEGE
 PREHOOK: Output: default@authorization_create_temp_table_1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_view_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_1.q.out b/ql/src/test/results/clientpositive/authorization_view_1.q.out
index 0703c4f..01ce416 100644
--- a/ql/src/test/results/clientpositive/authorization_view_1.q.out
+++ b/ql/src/test/results/clientpositive/authorization_view_1.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out
index 0341f0b..8fe92dd 100644
--- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out
+++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create view v as select * from src_autho_test
 PREHOOK: type: CREATEVIEW
 PREHOOK: Input: default@src_autho_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/autogen_colalias.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autogen_colalias.q.out b/ql/src/test/results/clientpositive/autogen_colalias.q.out
index 896a468..ec049a7 100644
--- a/ql/src/test/results/clientpositive/autogen_colalias.q.out
+++ b/ql/src/test/results/clientpositive/autogen_colalias.q.out
@@ -20,6 +20,16 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest_grouped_old1
+POSTHOOK: Lineage: dest_grouped_old1.c0 SIMPLE []
+POSTHOOK: Lineage: dest_grouped_old1.c3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.c5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.c6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.c7 SIMPLE []
+POSTHOOK: Lineage: dest_grouped_old1.c8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.c9 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_old1.zz SIMPLE []
 PREHOOK: query: describe dest_grouped_old1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@dest_grouped_old1
@@ -46,6 +56,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest_grouped_old2
+POSTHOOK: Lineage: dest_grouped_old2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: describe dest_grouped_old2
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@dest_grouped_old2
@@ -69,6 +80,16 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest_grouped_new1
+POSTHOOK: Lineage: dest_grouped_new1.column_0 SIMPLE []
+POSTHOOK: Lineage: dest_grouped_new1.column_2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.count_sin_src_value_6 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.count_src_value_4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.current_timestamp_7 SIMPLE []
+POSTHOOK: Lineage: dest_grouped_new1.if_src_key_10_src_ke_9 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.sin_count_src_value_5 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.sum_if_value_10_valu_8 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.test_max_length_src__3 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: dest_grouped_new1.zz SIMPLE []
 PREHOOK: query: describe dest_grouped_new1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@dest_grouped_new1
@@ -95,6 +116,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dest_grouped_new2
+POSTHOOK: Lineage: dest_grouped_new2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: describe dest_grouped_new2
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@dest_grouped_new2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
index e7094e8..7542882 100644
--- a/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
+++ b/ql/src/test/results/clientpositive/cbo_SortUnionTransposeRule.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
 select key from s a
 union all

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
index 6697b2e..89f1746 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src order by key
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Map Join MAPJOIN[8][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cbo_rp_unionDistinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cbo_rp_unionDistinct_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_unionDistinct_2.q.out
index 6d59369..304d74f 100644
--- a/ql/src/test/results/clientpositive/cbo_rp_unionDistinct_2.q.out
+++ b/ql/src/test/results/clientpositive/cbo_rp_unionDistinct_2.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u1
+POSTHOOK: Lineage: u1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u2 as select key, value from src order by key limit 3
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u2
+POSTHOOK: Lineage: u2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u3 as select key, value from src order by key desc limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u3
+POSTHOOK: Lineage: u3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from u1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/char_nested_types.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/char_nested_types.q.out b/ql/src/test/results/clientpositive/char_nested_types.q.out
index 07bf3c3..f989132 100644
--- a/ql/src/test/results/clientpositive/char_nested_types.q.out
+++ b/ql/src/test/results/clientpositive/char_nested_types.q.out
@@ -173,6 +173,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_nested_struct
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_nested_cta
+POSTHOOK: Lineage: char_nested_cta.c1 SIMPLE [(char_nested_struct)char_nested_struct.FieldSchema(name:c1, type:struct<a:int,b:char(20),c:string>, comment:null), ]
 PREHOOK: query: describe char_nested_cta
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@char_nested_cta
@@ -203,6 +204,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_nested_struct
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_nested_view
+POSTHOOK: Lineage: char_nested_view.c1 SIMPLE [(char_nested_struct)char_nested_struct.FieldSchema(name:c1, type:struct<a:int,b:char(20),c:string>, comment:null), ]
 PREHOOK: query: describe char_nested_view
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@char_nested_view

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
index 99c8fac..d567ec8 100644
--- a/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
+++ b/ql/src/test/results/clientpositive/colstats_all_nulls.q.out
@@ -24,6 +24,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src_null
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@all_nulls
+POSTHOOK: Lineage: all_nulls.a SIMPLE [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
+POSTHOOK: Lineage: all_nulls.b EXPRESSION [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
+POSTHOOK: Lineage: all_nulls.c EXPRESSION [(src_null)src_null.FieldSchema(name:a, type:bigint, comment:null), ]
 PREHOOK: query: analyze table all_nulls compute statistics for columns
 PREHOOK: type: QUERY
 PREHOOK: Input: default@all_nulls

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/create_default_prop.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/create_default_prop.q.out b/ql/src/test/results/clientpositive/create_default_prop.q.out
index 4b267ae..6d002c9 100644
--- a/ql/src/test/results/clientpositive/create_default_prop.q.out
+++ b/ql/src/test/results/clientpositive/create_default_prop.q.out
@@ -42,6 +42,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@table_p1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@table_p3
+POSTHOOK: Lineage: table_p3.a SIMPLE [(table_p1)table_p1.FieldSchema(name:a, type:string, comment:null), ]
 PREHOOK: query: DESC EXTENDED table_p3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@table_p3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out
index f9a8a0f..4feb798 100644
--- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
index a8bac78..f34f2b5 100644
--- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src order by key
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-3:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
index 06003ee..c9676de 100644
--- a/ql/src/test/results/clientpositive/ctas.q.out
+++ b/ql/src/test/results/clientpositive/ctas.q.out
@@ -116,6 +116,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_CTAS1
+POSTHOOK: Lineage: nzhang_ctas1.k SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_CTAS1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas1
@@ -265,6 +267,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas2
+POSTHOOK: Lineage: nzhang_ctas2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas2
@@ -414,6 +418,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas3
+POSTHOOK: Lineage: nzhang_ctas3.conb EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas3.half_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas3
@@ -628,6 +634,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas4
+POSTHOOK: Lineage: nzhang_ctas4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas4
@@ -938,6 +946,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas5
+POSTHOOK: Lineage: nzhang_ctas5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -966,3 +976,5 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@nzhang_ctas6
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas7
+POSTHOOK: Lineage: nzhang_ctas7.key SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_ctas7.to SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:to, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_char.q.out b/ql/src/test/results/clientpositive/ctas_char.q.out
index e8d5c41..c5dbe56 100644
--- a/ql/src/test/results/clientpositive/ctas_char.q.out
+++ b/ql/src/test/results/clientpositive/ctas_char.q.out
@@ -42,6 +42,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@ctas_char_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ctas_char_2
+POSTHOOK: Lineage: ctas_char_2.key SIMPLE [(ctas_char_1)ctas_char_1.FieldSchema(name:key, type:char(10), comment:null), ]
+POSTHOOK: Lineage: ctas_char_2.value SIMPLE [(ctas_char_1)ctas_char_1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- view with char column
 create view ctas_char_3 as select key, value from ctas_char_2
 PREHOOK: type: CREATEVIEW

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas_colname.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
index 7c86c66..2622676 100644
--- a/ql/src/test/results/clientpositive/ctas_colname.q.out
+++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
@@ -88,6 +88,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@summary
+POSTHOOK: Lineage: summary.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: summary.c2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: summary.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: summary.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted summary
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@summary
@@ -246,6 +250,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x4
+POSTHOOK: Lineage: x4.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x4.rr SCRIPT [(src1)src1.FieldSchema(name:key, type:string, comment:default), (src1)src1.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: x4.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted x4
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x4
@@ -434,6 +441,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x5
+POSTHOOK: Lineage: x5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x5.lead1 SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: x5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted x5
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x5
@@ -604,6 +614,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x6
+POSTHOOK: Lineage: x6._c1 EXPRESSION [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x6.key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x6.value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted x6
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x6
@@ -752,6 +765,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x7
+POSTHOOK: Lineage: x7._c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: x7._col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x7._col1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted x7
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x7
@@ -1183,6 +1199,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x8
+POSTHOOK: Lineage: x8._c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: x8._col0 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: x8._col1 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: describe formatted x8
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x8
@@ -1317,6 +1336,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@x9
+POSTHOOK: Lineage: x9._c0 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: x9.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: describe formatted x9
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@x9

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_date.q.out b/ql/src/test/results/clientpositive/ctas_date.q.out
index 884e649..a441f8d 100644
--- a/ql/src/test/results/clientpositive/ctas_date.q.out
+++ b/ql/src/test/results/clientpositive/ctas_date.q.out
@@ -47,6 +47,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@ctas_date_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ctas_date_2
+POSTHOOK: Lineage: ctas_date_2.c3 SIMPLE []
+POSTHOOK: Lineage: ctas_date_2.dd SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:dd, type:date, comment:null), ]
+POSTHOOK: Lineage: ctas_date_2.key SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: ctas_date_2.value SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- view with date column
 create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01'
 PREHOOK: type: CREATEVIEW

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
index 35f99e6..39d8bcf 100644
--- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
+++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
@@ -124,6 +124,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:db1
 POSTHOOK: Output: db1@table_db1
+POSTHOOK: Lineage: table_db1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: table_db1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: DESCRIBE FORMATTED table_db1
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: db1@table_db1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/ctas_varchar.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ctas_varchar.q.out b/ql/src/test/results/clientpositive/ctas_varchar.q.out
index 9ceb64a..9715e09 100644
--- a/ql/src/test/results/clientpositive/ctas_varchar.q.out
+++ b/ql/src/test/results/clientpositive/ctas_varchar.q.out
@@ -42,6 +42,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@ctas_varchar_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ctas_varchar_2
+POSTHOOK: Lineage: ctas_varchar_2.key SIMPLE [(ctas_varchar_1)ctas_varchar_1.FieldSchema(name:key, type:varchar(10), comment:null), ]
+POSTHOOK: Lineage: ctas_varchar_2.value SIMPLE [(ctas_varchar_1)ctas_varchar_1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- view with varchar column
 create view ctas_varchar_3 as select key, value from ctas_varchar_2
 PREHOOK: type: CREATEVIEW

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cte_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out
index d6923ba..4b0bc71 100644
--- a/ql/src/test/results/clientpositive/cte_2.q.out
+++ b/ql/src/test/results/clientpositive/cte_2.q.out
@@ -94,6 +94,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s2
+POSTHOOK: Lineage: s2.key SIMPLE []
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/cte_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/cte_4.q.out b/ql/src/test/results/clientpositive/cte_4.q.out
index d560d74..6385abe 100644
--- a/ql/src/test/results/clientpositive/cte_4.q.out
+++ b/ql/src/test/results/clientpositive/cte_4.q.out
@@ -124,6 +124,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@q1
 POSTHOOK: Output: default@s2
 #### A masked pattern was here ####
+POSTHOOK: Lineage: s2.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/database.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/database.q.out b/ql/src/test/results/clientpositive/database.q.out
index 02eb3d3..8c2653c 100644
--- a/ql/src/test/results/clientpositive/database.q.out
+++ b/ql/src/test/results/clientpositive/database.q.out
@@ -1324,6 +1324,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:db2
 POSTHOOK: Output: db2@conflict_name
+POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- CREATE foreign table
 CREATE TABLE db1.conflict_name AS
 SELECT value FROM db1.src WHERE key = 8
@@ -1338,6 +1339,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: db1@src
 POSTHOOK: Output: database:db1
 POSTHOOK: Output: db1@conflict_name
+POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- query tables with the same names in different DBs
 SELECT * FROM (
   SELECT value FROM db1.conflict_name

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
index dadd9f1..f9adbc9 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_ddl1.q.out
@@ -66,6 +66,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T3
+POSTHOOK: Lineage: t3.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: t3.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ]
 PREHOOK: query: create table T4 (key char(10), val decimal(5,2), b int)
     partitioned by (ds string)
     clustered by (b) into 10 buckets

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out
index e1ce600..2c2c97a 100644
--- a/ql/src/test/results/clientpositive/decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/decimal_6.q.out
@@ -136,6 +136,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_6_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_6_3
+POSTHOOK: Lineage: decimal_6_3.k EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:key, type:decimal(10,5), comment:null), ]
+POSTHOOK: Lineage: decimal_6_3.v EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: desc DECIMAL_6_3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@decimal_6_3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/decimal_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_join2.q.out b/ql/src/test/results/clientpositive/decimal_join2.q.out
index 47e7ceb..3a0b327 100644
--- a/ql/src/test/results/clientpositive/decimal_join2.q.out
+++ b/ql/src/test/results/clientpositive/decimal_join2.q.out
@@ -38,6 +38,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_3_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_3
+POSTHOOK: Lineage: decimal_3.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: decimal_3.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN
 SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/decimal_serde.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/decimal_serde.q.out b/ql/src/test/results/clientpositive/decimal_serde.q.out
index 520f85f..e0f5c74 100644
--- a/ql/src/test/results/clientpositive/decimal_serde.q.out
+++ b/ql/src/test/results/clientpositive/decimal_serde.q.out
@@ -96,6 +96,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_text
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_RC
+POSTHOOK: Lineage: decimal_rc.key SIMPLE [(decimal_text)decimal_text.FieldSchema(name:key, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: decimal_rc.value SIMPLE [(decimal_text)decimal_text.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: describe formatted DECIMAL_RC
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@decimal_rc
@@ -147,6 +149,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_rc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_LAZY_COL
+POSTHOOK: Lineage: decimal_lazy_col.key SIMPLE [(decimal_rc)decimal_rc.FieldSchema(name:key, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: decimal_lazy_col.value SIMPLE [(decimal_rc)decimal_rc.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: describe formatted DECIMAL_LAZY_COL
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@decimal_lazy_col
@@ -204,6 +208,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_lazy_col
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_SEQUENCE
+POSTHOOK: Lineage: decimal_sequence.key SIMPLE [(decimal_lazy_col)decimal_lazy_col.FieldSchema(name:key, type:decimal(10,0), comment:null), ]
+POSTHOOK: Lineage: decimal_sequence.value SIMPLE [(decimal_lazy_col)decimal_lazy_col.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: SELECT * FROM DECIMAL_SEQUENCE ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_sequence

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/empty_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/empty_join.q.out b/ql/src/test/results/clientpositive/empty_join.q.out
index a1aad9f..9a47c1f 100644
--- a/ql/src/test/results/clientpositive/empty_join.q.out
+++ b/ql/src/test/results/clientpositive/empty_join.q.out
@@ -12,6 +12,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test_1
+POSTHOOK: Lineage: test_1.id SIMPLE []
 PREHOOK: query: DROP TABLE IF EXISTS test_2
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS test_2
@@ -38,6 +39,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test_3
+POSTHOOK: Lineage: test_3.id SIMPLE []
 PREHOOK: query: explain
 SELECT t1.id, t2.id, t3.id
 FROM test_1 t1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/explain_ddl.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out
index 3371e7e..fa73d99 100644
--- a/ql/src/test/results/clientpositive/explain_ddl.q.out
+++ b/ql/src/test/results/clientpositive/explain_ddl.q.out
@@ -35,6 +35,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@M1
+POSTHOOK: Lineage: m1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: m1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select count(*) from M1 where key > 0
 PREHOOK: type: QUERY
 PREHOOK: Input: default@m1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/global_limit.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/global_limit.q.out b/ql/src/test/results/clientpositive/global_limit.q.out
index 7da20d5..0c76f3f 100644
--- a/ql/src/test/results/clientpositive/global_limit.q.out
+++ b/ql/src/test/results/clientpositive/global_limit.q.out
@@ -58,6 +58,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@gl_src1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@gl_tgt
+POSTHOOK: Lineage: gl_tgt.key SIMPLE [(gl_src1)gl_src1.FieldSchema(name:key, type:int, comment:null), ]
 PREHOOK: query: select * from gl_tgt ORDER BY key ASC
 PREHOOK: type: QUERY
 PREHOOK: Input: default@gl_tgt

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
index 49ab3ae..dd2ea75 100644
--- a/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
+++ b/ql/src/test/results/clientpositive/groupby_duplicate_key.q.out
@@ -157,6 +157,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dummy
+POSTHOOK: Lineage: dummy.dummy1 SIMPLE []
+POSTHOOK: Lineage: dummy.dummy2 SIMPLE []
+POSTHOOK: Lineage: dummy.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select key,dummy1,dummy2 from dummy
 PREHOOK: type: QUERY
 PREHOOK: Input: default@dummy

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/input46.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/input46.q.out b/ql/src/test/results/clientpositive/input46.q.out
index 4240566..e07d63c 100644
--- a/ql/src/test/results/clientpositive/input46.q.out
+++ b/ql/src/test/results/clientpositive/input46.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:table_in_database_creation
 POSTHOOK: Output: table_in_database_creation@test1
+POSTHOOK: Lineage: test1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table `table_in_database_creation`.`test2` as select * from src limit 1
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -24,6 +26,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:table_in_database_creation
 POSTHOOK: Output: table_in_database_creation@test2
+POSTHOOK: Lineage: test2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: test2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table table_in_database_creation.test3 (a string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:table_in_database_creation

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/insert0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/insert0.q.out b/ql/src/test/results/clientpositive/insert0.q.out
index e83bae1..e8a4884 100644
--- a/ql/src/test/results/clientpositive/insert0.q.out
+++ b/ql/src/test/results/clientpositive/insert0.q.out
@@ -94,6 +94,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ctas_table
+POSTHOOK: Lineage: ctas_table.foo EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: ctas_table.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: describe extended ctas_table
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@ctas_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/join41.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join41.q.out b/ql/src/test/results/clientpositive/join41.q.out
index b33652d..2ad59f5 100644
--- a/ql/src/test/results/clientpositive/join41.q.out
+++ b/ql/src/test/results/clientpositive/join41.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s1
+POSTHOOK: Lineage: s1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN
 SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/join42.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out
index e52f1fc..542408f 100644
--- a/ql/src/test/results/clientpositive/join42.q.out
+++ b/ql/src/test/results/clientpositive/join42.q.out
@@ -8,6 +8,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@L
+POSTHOOK: Lineage: l.id SIMPLE []
 PREHOOK: query: create table LA as select 4436 loan_id, 4748 aid, 4415 pi_id
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -18,6 +19,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@LA
+POSTHOOK: Lineage: la.aid SIMPLE []
+POSTHOOK: Lineage: la.loan_id SIMPLE []
+POSTHOOK: Lineage: la.pi_id SIMPLE []
 PREHOOK: query: create table FR as select 4436 loan_id
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -28,6 +32,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@FR
+POSTHOOK: Lineage: fr.loan_id SIMPLE []
 PREHOOK: query: create table A as select 4748 id
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -38,6 +43,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.id SIMPLE []
 PREHOOK: query: create table PI as select 4415 id
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -48,6 +54,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@PI
+POSTHOOK: Lineage: pi.id SIMPLE []
 PREHOOK: query: create table acct as select 4748 aid, 10 acc_n, 122 brn
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: _dummy_database@_dummy_table
@@ -58,6 +65,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@acct
+POSTHOOK: Lineage: acct.acc_n SIMPLE []
+POSTHOOK: Lineage: acct.aid SIMPLE []
+POSTHOOK: Lineage: acct.brn SIMPLE []
 PREHOOK: query: insert into table acct values(4748, null, null)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@values__tmp__table__1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/join_filters_overlap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
index ca1e085..dede6b7 100644
--- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@a
+POSTHOOK: Lineage: a.key SIMPLE []
+POSTHOOK: Lineage: a.value SCRIPT []
 PREHOOK: query: -- overlap on a
 explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/lateral_view_outer.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/lateral_view_outer.q.out b/ql/src/test/results/clientpositive/lateral_view_outer.q.out
index bfb1650..994945a 100644
--- a/ql/src/test/results/clientpositive/lateral_view_outer.q.out
+++ b/ql/src/test/results/clientpositive/lateral_view_outer.q.out
@@ -192,6 +192,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@array_valued
+POSTHOOK: Lineage: array_valued.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: array_valued.value EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
 select * from array_valued LATERAL VIEW OUTER explode(value) C AS a limit 10
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/cte_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_2.q.out b/ql/src/test/results/clientpositive/llap/cte_2.q.out
index 23f8ec6..017bded 100644
--- a/ql/src/test/results/clientpositive/llap/cte_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_2.q.out
@@ -94,6 +94,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s2
+POSTHOOK: Lineage: s2.key SIMPLE []
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2


[28/51] [abbrv] hive git commit: HIVE-13237: Select parquet struct field with upper case throws NPE (Jimmy, reviewed by Xuefu)

Posted by jd...@apache.org.
HIVE-13237: Select parquet struct field with upper case throws NPE (Jimmy, reviewed by Xuefu)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6502b5e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6502b5e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6502b5e

Branch: refs/heads/llap
Commit: b6502b5ea35f316ed10e71d845a7b5c6ab4ad151
Parents: 62bae5e
Author: Jimmy Xiang <jx...@apache.org>
Authored: Tue Mar 8 17:47:38 2016 -0800
Committer: Jimmy Xiang <jx...@apache.org>
Committed: Fri Mar 11 07:33:56 2016 -0800

----------------------------------------------------------------------
 .../hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b6502b5e/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
index 7873c99..5f852d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java
@@ -63,7 +63,7 @@ public class ArrayWritableObjectInspector extends SettableStructObjectInspector
 
       final StructFieldImpl field = new StructFieldImpl(name, getObjectInspector(fieldInfo), i);
       fields.add(field);
-      fieldsByName.put(name, field);
+      fieldsByName.put(name.toLowerCase(), field);
     }
   }
 
@@ -158,7 +158,7 @@ public class ArrayWritableObjectInspector extends SettableStructObjectInspector
 
   @Override
   public StructField getStructFieldRef(final String name) {
-    return fieldsByName.get(name);
+    return fieldsByName.get(name.toLowerCase());
   }
 
   @Override


[19/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/cte_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/cte_4.q.out b/ql/src/test/results/clientpositive/llap/cte_4.q.out
index d560d74..6385abe 100644
--- a/ql/src/test/results/clientpositive/llap/cte_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/cte_4.q.out
@@ -124,6 +124,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@q1
 POSTHOOK: Output: default@s2
 #### A masked pattern was here ####
+POSTHOOK: Lineage: s2.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index 9f38717..ace2960 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -133,6 +133,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date
+POSTHOOK: Lineage: srcpart_date.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -151,6 +153,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_hour
+POSTHOOK: Lineage: srcpart_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -169,6 +173,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date_hour
+POSTHOOK: Lineage: srcpart_date_hour.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -187,6 +195,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_double_hour
+POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: -- single column, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
index f3e9ac7..d611e8b 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -1274,6 +1274,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_mapjoin
+POSTHOOK: Lineage: decimal_mapjoin.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
index ac429bc..947ff71 100644
--- a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
@@ -20,6 +20,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc
+POSTHOOK: Lineage: src_orc.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: src_orc.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: src_orc.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain extended
 select * from src_orc where 1=2
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/llap_udf.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_udf.q.out b/ql/src/test/results/clientpositive/llap/llap_udf.q.out
index 29ed978..a8a9415 100644
--- a/ql/src/test/results/clientpositive/llap/llap_udf.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_udf.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc
+POSTHOOK: Lineage: src_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- Not using GenericUDFTestGetJavaBoolean; that is already registered when tests begin
 
 CREATE TEMPORARY FUNCTION test_udf0 AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFEvaluateNPE'

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llapdecider.q.out b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
index a2d7f2a..2b0e639 100644
--- a/ql/src/test/results/clientpositive/llap/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
@@ -81,6 +81,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc
+POSTHOOK: Lineage: src_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/tez_dml.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dml.q.out b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
index b7a7e86..1487c90 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
@@ -107,6 +107,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tmp_src
+POSTHOOK: Lineage: tmp_src.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT * FROM tmp_src
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp_src
@@ -1483,6 +1485,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tmp_src_part
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@empty
+POSTHOOK: Lineage: empty.c SIMPLE [(tmp_src_part)tmp_src_part.FieldSchema(name:c, type:string, comment:null), ]
+POSTHOOK: Lineage: empty.d SIMPLE []
 PREHOOK: query: SELECT * FROM empty
 PREHOOK: type: QUERY
 PREHOOK: Input: default@empty

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
index d3c714c..08d89c4 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_result_complex.q.out
@@ -669,6 +669,22 @@ POSTHOOK: Input: default@ct_events_clean
 POSTHOOK: Input: default@service_request_clean
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ct_events1_test
+POSTHOOK: Lineage: ct_events1_test.ce_create_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_create_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_end_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_end_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_notes SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cmpltyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cmpltyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctevs_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cnctevs_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctmd_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cnctmd_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cntvnst_stts_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cntvnst_stts_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_event_id SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_event_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_mode SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_mode, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_type SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_type, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.notes SIMPLE [(service_request_clean)b.FieldSchema(name:notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.src SIMPLE [(service_request_clean)b.FieldSchema(name:sum_reason_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqct_cds SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqct_cds, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqst_id SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqst_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrtyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:svcrtyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.total_transfers SIMPLE [(ct_events_clean)a.FieldSchema(name:total_transfers, type:int, comment:null), ]
 PREHOOK: query: select * from ct_events1_test
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ct_events1_test
@@ -1893,6 +1909,22 @@ POSTHOOK: Input: default@ct_events_clean
 POSTHOOK: Input: default@service_request_clean
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ct_events1_test
+POSTHOOK: Lineage: ct_events1_test.ce_create_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_create_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_end_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_end_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_notes SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cmpltyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cmpltyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctevs_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cnctevs_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctmd_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cnctmd_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cntvnst_stts_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cntvnst_stts_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_event_id SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_event_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_mode SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_mode, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_type SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_type, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.notes SIMPLE [(service_request_clean)b.FieldSchema(name:notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.src SIMPLE [(service_request_clean)b.FieldSchema(name:sum_reason_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqct_cds SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqct_cds, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqst_id SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqst_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrtyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:svcrtyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.total_transfers SIMPLE [(ct_events_clean)a.FieldSchema(name:total_transfers, type:int, comment:null), ]
 PREHOOK: query: select * from ct_events1_test
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ct_events1_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/tez_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union.q.out b/ql/src/test/results/clientpositive/llap/tez_union.q.out
index f42fae1..4388bfc 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union.q.out
@@ -112,6 +112,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.key EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.value EXPRESSION [(src)s1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by key, value limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -308,6 +310,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.cnt EXPRESSION [(src)src.null, ]
 PREHOOK: query: select * from ut order by cnt limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -456,6 +459,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by skey, ukey limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -821,6 +826,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.lkey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by skey, ukey, lkey limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -1079,6 +1087,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by key limit 30
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -1342,6 +1351,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by ukey, skey limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index 35b7544..db21036 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -133,6 +133,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date
+POSTHOOK: Lineage: srcpart_date.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -151,6 +153,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_hour
+POSTHOOK: Lineage: srcpart_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -169,6 +173,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date_hour
+POSTHOOK: Lineage: srcpart_date_hour.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -187,6 +195,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_double_hour
+POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: -- single column, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap_partitioned.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap_partitioned.q.out b/ql/src/test/results/clientpositive/llap_partitioned.q.out
index 25d2f4f..6f259a6 100644
--- a/ql/src/test/results/clientpositive/llap_partitioned.q.out
+++ b/ql/src/test/results/clientpositive/llap_partitioned.q.out
@@ -1964,6 +1964,10 @@ POSTHOOK: Input: default@orc_llap_part@ctinyint=9
 POSTHOOK: Input: default@orc_llap_part@ctinyint=__HIVE_DEFAULT_PARTITION__
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cchar1 SIMPLE [(orc_llap_part)oft.FieldSchema(name:cchar1, type:char(255), comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap_part)oft.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap_part)oft.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cvchar1 SIMPLE [(orc_llap_part)oft.FieldSchema(name:cvchar1, type:varchar(255), comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/llap_uncompressed.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap_uncompressed.q.out b/ql/src/test/results/clientpositive/llap_uncompressed.q.out
index 4f4eb70..d40d63d 100644
--- a/ql/src/test/results/clientpositive/llap_uncompressed.q.out
+++ b/ql/src/test/results/clientpositive/llap_uncompressed.q.out
@@ -117,6 +117,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/merge3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out
index 266abdf..5b581db 100644
--- a/ql/src/test/results/clientpositive/merge3.q.out
+++ b/ql/src/test/results/clientpositive/merge3.q.out
@@ -22,6 +22,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@merge_src
+POSTHOOK: Lineage: merge_src.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: merge_src.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table merge_src_part (key string, value string) partitioned by (ds string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -339,6 +341,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@merge_src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@merge_src2
+POSTHOOK: Lineage: merge_src2.key SIMPLE [(merge_src)merge_src.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: merge_src2.value SIMPLE [(merge_src)merge_src.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from merge_src2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@merge_src2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
index 07a0f3e..7964405 100644
--- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_10
+POSTHOOK: Lineage: src_10.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table src_lv1 (key string, value string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
index 3a35323..2036e63 100644
--- a/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
+++ b/ql/src/test/results/clientpositive/multi_insert_union_src.q.out
@@ -20,6 +20,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src2
+POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table src_multi1 like src
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/multi_join_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/multi_join_union.q.out b/ql/src/test/results/clientpositive/multi_join_union.q.out
index aad66db..b361a1a 100644
--- a/ql/src/test/results/clientpositive/multi_join_union.q.out
+++ b/ql/src/test/results/clientpositive/multi_join_union.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src11
+POSTHOOK: Lineage: src11.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src12 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src12
+POSTHOOK: Lineage: src12.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src13 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src13
+POSTHOOK: Lineage: src13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src14 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -42,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src14
+POSTHOOK: Lineage: src14.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN SELECT * FROM 
 src11 a JOIN
 src12 b ON (a.key = b.key) JOIN

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/nestedvirtual.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nestedvirtual.q.out b/ql/src/test/results/clientpositive/nestedvirtual.q.out
index 05af502..4e3488b 100644
--- a/ql/src/test/results/clientpositive/nestedvirtual.q.out
+++ b/ql/src/test/results/clientpositive/nestedvirtual.q.out
@@ -26,6 +26,9 @@ POSTHOOK: Input: default@pokes
 POSTHOOK: Input: default@pokes2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jssarma_nilzma_bad
+POSTHOOK: Lineage: jssarma_nilzma_bad.filename SIMPLE [(pokes)pokes.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.offset SIMPLE [(pokes)pokes.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.val EXPRESSION [(pokes)pokes.FieldSchema(name:foo, type:int, comment:null), ]
 PREHOOK: query: drop table jssarma_nilzma_bad
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@jssarma_nilzma_bad
@@ -78,6 +81,9 @@ POSTHOOK: Input: default@pokes
 POSTHOOK: Input: default@pokes2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jssarma_nilzma_bad
+POSTHOOK: Lineage: jssarma_nilzma_bad.filename SIMPLE [(pokes)pokes.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.offset SIMPLE [(pokes)pokes.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.val EXPRESSION [(pokes)pokes.FieldSchema(name:foo, type:int, comment:null), ]
 PREHOOK: query: drop table jssarma_nilzma_bad
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@jssarma_nilzma_bad
@@ -130,6 +136,9 @@ POSTHOOK: Input: default@pokes
 POSTHOOK: Input: default@pokes2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jssarma_nilzma_bad
+POSTHOOK: Lineage: jssarma_nilzma_bad.filename SIMPLE [(pokes)pokes.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.offset SIMPLE [(pokes)pokes.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
+POSTHOOK: Lineage: jssarma_nilzma_bad.val EXPRESSION [(pokes)pokes.FieldSchema(name:foo, type:int, comment:null), ]
 PREHOOK: query: drop table jssarma_nilzma_bad
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@jssarma_nilzma_bad

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/non_ascii_literal2.q.out b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
index 7e19143..97a523e 100644
--- a/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
+++ b/ql/src/test/results/clientpositive/non_ascii_literal2.q.out
@@ -10,6 +10,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@non_ascii_literal2
+POSTHOOK: Lineage: non_ascii_literal2.col1 SIMPLE []
+POSTHOOK: Lineage: non_ascii_literal2.col2 SIMPLE []
 PREHOOK: query: select * from non_ascii_literal2
 where col2 = "Абвгде"
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/nullformatCTAS.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/nullformatCTAS.q.out
index c8628fd..7686419 100644
--- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out
+++ b/ql/src/test/results/clientpositive/nullformatCTAS.q.out
@@ -144,6 +144,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@base_tab
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@null_tab3
+POSTHOOK: Lineage: null_tab3.a SIMPLE [(base_tab)base_tab.FieldSchema(name:a, type:string, comment:null), ]
+POSTHOOK: Lineage: null_tab3.b SIMPLE [(base_tab)base_tab.FieldSchema(name:b, type:string, comment:null), ]
 PREHOOK: query: DESCRIBE EXTENDED null_tab3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@null_tab3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/orc_createas1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_createas1.q.out b/ql/src/test/results/clientpositive/orc_createas1.q.out
index 0e1bb54..506f39d 100644
--- a/ql/src/test/results/clientpositive/orc_createas1.q.out
+++ b/ql/src/test/results/clientpositive/orc_createas1.q.out
@@ -144,6 +144,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_createas1b
+POSTHOOK: Lineage: orc_createas1b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orc_createas1b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN SELECT * FROM orc_createas1b ORDER BY key LIMIT 5
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT * FROM orc_createas1b ORDER BY key LIMIT 5
@@ -314,6 +316,9 @@ POSTHOOK: Input: default@orc_createas1a@ds=1
 POSTHOOK: Input: default@orc_createas1a@ds=2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc_createas1c
+POSTHOOK: Lineage: orc_createas1c.key SIMPLE [(orc_createas1a)orc_createas1a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_createas1c.part EXPRESSION [(orc_createas1a)orc_createas1a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_createas1c.value SIMPLE [(orc_createas1a)orc_createas1a.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
     FROM orc_createas1a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/orc_llap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_llap.q.out b/ql/src/test/results/clientpositive/orc_llap.q.out
index 894add3..6fc73b7 100644
--- a/ql/src/test/results/clientpositive/orc_llap.q.out
+++ b/ql/src/test/results/clientpositive/orc_llap.q.out
@@ -293,6 +293,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -363,6 +366,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -433,6 +448,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -518,6 +534,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.c2 EXPRESSION [(orc_llap)orc_llap.null, ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -627,6 +646,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)o1.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)o1.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -734,6 +755,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -804,6 +828,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cbigint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cboolean2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cdouble SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cfloat SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.csmallint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctimestamp2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.ctinyint SIMPLE [(orc_llap)orc_llap.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -874,6 +910,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -959,6 +996,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.c2 EXPRESSION [(orc_llap)orc_llap.null, ]
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)orc_llap.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table
@@ -1068,6 +1108,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@orc_llap
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@llap_temp_table
+POSTHOOK: Lineage: llap_temp_table.cstring1 SIMPLE [(orc_llap)o1.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: llap_temp_table.cstring2 SIMPLE [(orc_llap)o1.FieldSchema(name:cstring2, type:string, comment:null), ]
 PREHOOK: query: select sum(hash(*)) from llap_temp_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@llap_temp_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parallel_orderby.q.out b/ql/src/test/results/clientpositive/parallel_orderby.q.out
index 23b0c10..2991122 100644
--- a/ql/src/test/results/clientpositive/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/parallel_orderby.q.out
@@ -91,6 +91,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src5
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@total_ordered
+POSTHOOK: Lineage: total_ordered.key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: total_ordered.value SIMPLE [(src5)src5.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: desc formatted total_ordered
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@total_ordered
@@ -204,6 +206,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src5
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@total_ordered
+POSTHOOK: Lineage: total_ordered.key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: total_ordered.value SIMPLE [(src5)src5.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: desc formatted total_ordered
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@total_ordered

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_ctas.q.out b/ql/src/test/results/clientpositive/parquet_ctas.q.out
index 68f0ead..c9a9c13 100644
--- a/ql/src/test/results/clientpositive/parquet_ctas.q.out
+++ b/ql/src/test/results/clientpositive/parquet_ctas.q.out
@@ -46,6 +46,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_ctas
+POSTHOOK: Lineage: parquet_ctas.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_ctas.value SIMPLE [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: describe parquet_ctas
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@parquet_ctas
@@ -82,6 +84,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_ctas_advanced
+POSTHOOK: Lineage: parquet_ctas_advanced.c0 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_ctas_advanced.c1 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: describe parquet_ctas_advanced
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@parquet_ctas_advanced
@@ -118,6 +122,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_ctas_alias
+POSTHOOK: Lineage: parquet_ctas_alias.mykey EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_ctas_alias.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: describe parquet_ctas_alias
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@parquet_ctas_alias
@@ -154,6 +160,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_ctas_mixed
+POSTHOOK: Lineage: parquet_ctas_mixed.c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_ctas_mixed.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_ctas_mixed.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: describe parquet_ctas_mixed
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@parquet_ctas_mixed

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_join.q.out b/ql/src/test/results/clientpositive/parquet_join.q.out
index 91bda4e..86fb64a 100644
--- a/ql/src/test/results/clientpositive/parquet_join.q.out
+++ b/ql/src/test/results/clientpositive/parquet_join.q.out
@@ -46,6 +46,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_jointable1
+POSTHOOK: Lineage: parquet_jointable1.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable1.value SIMPLE [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@staging
@@ -56,6 +58,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_jointable2
+POSTHOOK: Lineage: parquet_jointable2.c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- MR join

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
index 5b4e7b6..825e668 100644
--- a/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/parquet_map_null.q.java1.7.out
@@ -38,6 +38,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@avro_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_table
+POSTHOOK: Lineage: parquet_table.avreau_col_1 SIMPLE [(avro_table)avro_table.FieldSchema(name:avreau_col_1, type:map<string,string>, comment:), ]
 PREHOOK: query: SELECT * FROM parquet_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out b/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out
index 6ce63f7..8d5b8f2 100644
--- a/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out
+++ b/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out
@@ -36,6 +36,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@parquet_map_of_arrays_of_ints
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_map_of_arrays_of_ints_copy
+POSTHOOK: Lineage: parquet_map_of_arrays_of_ints_copy.examples SIMPLE [(parquet_map_of_arrays_of_ints)parquet_map_of_arrays_of_ints.FieldSchema(name:examples, type:map<string,array<int>>, comment:null), ]
 PREHOOK: query: SELECT * FROM parquet_map_of_arrays_of_ints_copy
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_map_of_arrays_of_ints_copy

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out b/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out
index 4775d2e..4c26b45 100644
--- a/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out
+++ b/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out
@@ -34,6 +34,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@parquet_map_of_maps
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_map_of_maps_copy
+POSTHOOK: Lineage: parquet_map_of_maps_copy.map_of_maps SIMPLE [(parquet_map_of_maps)parquet_map_of_maps.FieldSchema(name:map_of_maps, type:map<string,map<string,int>>, comment:null), ]
 PREHOOK: query: SELECT * FROM parquet_map_of_maps_copy
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_map_of_maps_copy

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_mixed_partition_formats2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_mixed_partition_formats2.q.out b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats2.q.out
index c4d7197..575e83f 100644
--- a/ql/src/test/results/clientpositive/parquet_mixed_partition_formats2.q.out
+++ b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats2.q.out
@@ -87,6 +87,10 @@ POSTHOOK: Input: default@parquet_table_json_partition
 POSTHOOK: Input: default@parquet_table_json_partition@ts=20150101
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@new_table
+POSTHOOK: Lineage: new_table.address SIMPLE [(parquet_table_json_partition)parquet_table_json_partition.FieldSchema(name:address, type:struct<country:bigint,state:bigint>, comment:from deserializer), ]
+POSTHOOK: Lineage: new_table.id SIMPLE [(parquet_table_json_partition)parquet_table_json_partition.FieldSchema(name:id, type:bigint, comment:from deserializer), ]
+POSTHOOK: Lineage: new_table.reports SIMPLE [(parquet_table_json_partition)parquet_table_json_partition.FieldSchema(name:reports, type:array<bigint>, comment:from deserializer), ]
+POSTHOOK: Lineage: new_table.ts SIMPLE [(parquet_table_json_partition)parquet_table_json_partition.FieldSchema(name:ts, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM new_table
 PREHOOK: type: QUERY
 PREHOOK: Input: default@new_table

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_nested_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out
index e1f316f..d7ef637 100644
--- a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out
+++ b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out
@@ -78,6 +78,11 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@nestedcomplex
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_nested_complex
+POSTHOOK: Lineage: parquet_nested_complex.max_nested_array SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:max_nested_array, type:array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<int>>>>>>>>>>>>>>>>>>>>>>>, comment:null), ]
+POSTHOOK: Lineage: parquet_nested_complex.max_nested_map SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:max_nested_map, type:array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<map<string,string>>>>>>>>>>>>>>>>>>>>>>, comment:null), ]
+POSTHOOK: Lineage: parquet_nested_complex.max_nested_struct SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:max_nested_struct, type:array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<array<struct<s:string,i:bigint>>>>>>>>>>>>>>>>>>>>>>>, comment:null), ]
+POSTHOOK: Lineage: parquet_nested_complex.simple_int SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:simple_int, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_nested_complex.simple_string SIMPLE [(nestedcomplex)nestedcomplex.FieldSchema(name:simple_string, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM parquet_nested_complex SORT BY simple_int
 PREHOOK: type: QUERY
 PREHOOK: Input: default@parquet_nested_complex

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out b/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
index 4b0711e..0b88d84 100644
--- a/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
+++ b/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
@@ -102,6 +102,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@newstructfield
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@NewStructFieldTable
+POSTHOOK: Lineage: newstructfieldtable.a SIMPLE [(newstructfield)newstructfield.FieldSchema(name:a, type:struct<a1:map<string,string>,a2:struct<e1:int,e2:string>,a3:int>, comment:null), ]
+POSTHOOK: Lineage: newstructfieldtable.b SIMPLE [(newstructfield)newstructfield.FieldSchema(name:b, type:int, comment:null), ]
 PREHOOK: query: DESCRIBE NewStructFieldTable
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@newstructfieldtable

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out b/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out
index 3aa1040..de58369 100644
--- a/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out
+++ b/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out
@@ -39,6 +39,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@text_tbl
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parq_tbl
+POSTHOOK: Lineage: parq_tbl.a SIMPLE [(text_tbl)text_tbl.FieldSchema(name:a, type:struct<b:struct<c:int>>, comment:null), ]
 PREHOOK: query: SELECT * FROM text_tbl
 PREHOOK: type: QUERY
 PREHOOK: Input: default@text_tbl

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/partition_decode_name.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_decode_name.q.out b/ql/src/test/results/clientpositive/partition_decode_name.q.out
index e1d06bb..2c700dc 100644
--- a/ql/src/test/results/clientpositive/partition_decode_name.q.out
+++ b/ql/src/test/results/clientpositive/partition_decode_name.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sc
+POSTHOOK: Lineage: sc._c0 EXPRESSION []
+POSTHOOK: Lineage: sc._c1 EXPRESSION []
 PREHOOK: query: create table sc_part (key string) partitioned by (ts string) stored as rcfile
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/partition_special_char.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/partition_special_char.q.out b/ql/src/test/results/clientpositive/partition_special_char.q.out
index 846f6f8..18ca0c3 100644
--- a/ql/src/test/results/clientpositive/partition_special_char.q.out
+++ b/ql/src/test/results/clientpositive/partition_special_char.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sc
+POSTHOOK: Lineage: sc._c0 EXPRESSION []
+POSTHOOK: Lineage: sc._c1 EXPRESSION []
 PREHOOK: query: create table sc_part (key string) partitioned by (ts string) stored as rcfile
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/query_result_fileformat.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/query_result_fileformat.q.out b/ql/src/test/results/clientpositive/query_result_fileformat.q.out
index 747a5f3..bce3e22 100644
--- a/ql/src/test/results/clientpositive/query_result_fileformat.q.out
+++ b/ql/src/test/results/clientpositive/query_result_fileformat.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_test1
+POSTHOOK: Lineage: nzhang_test1.key SIMPLE []
+POSTHOOK: Lineage: nzhang_test1.value SIMPLE []
 PREHOOK: query: select * from nzhang_test1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_test1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/rcfile_createas1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_createas1.q.out b/ql/src/test/results/clientpositive/rcfile_createas1.q.out
index 97eaa1a..9966fa2 100644
--- a/ql/src/test/results/clientpositive/rcfile_createas1.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_createas1.q.out
@@ -150,6 +150,9 @@ POSTHOOK: Input: default@rcfile_createas1a@ds=1
 POSTHOOK: Input: default@rcfile_createas1a@ds=2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@rcfile_createas1b
+POSTHOOK: Lineage: rcfile_createas1b.key SIMPLE [(rcfile_createas1a)rcfile_createas1a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: rcfile_createas1b.part EXPRESSION [(rcfile_createas1a)rcfile_createas1a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: rcfile_createas1b.value SIMPLE [(rcfile_createas1a)rcfile_createas1a.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: SELECT SUM(HASH(c)) FROM (
     SELECT TRANSFORM(key, value) USING 'tr \t _' AS (c)
     FROM rcfile_createas1a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/rcfile_default_format.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/rcfile_default_format.q.out b/ql/src/test/results/clientpositive/rcfile_default_format.q.out
index e85a16d..c961231 100644
--- a/ql/src/test/results/clientpositive/rcfile_default_format.q.out
+++ b/ql/src/test/results/clientpositive/rcfile_default_format.q.out
@@ -45,6 +45,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@rcfile_default_format_ctas
+POSTHOOK: Lineage: rcfile_default_format_ctas.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: rcfile_default_format_ctas.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: DESCRIBE FORMATTED rcfile_default_format_ctas
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@rcfile_default_format_ctas
@@ -141,6 +143,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@rcfile_default_format_ctas
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@textfile_default_format_ctas
+POSTHOOK: Lineage: textfile_default_format_ctas.key SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: textfile_default_format_ctas.value SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: DESCRIBE FORMATTED textfile_default_format_ctas
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@textfile_default_format_ctas
@@ -186,6 +190,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@rcfile_default_format_ctas
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@rcfile_default_format_ctas_default_serde
+POSTHOOK: Lineage: rcfile_default_format_ctas_default_serde.key SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: rcfile_default_format_ctas_default_serde.value SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: DESCRIBE FORMATTED rcfile_default_format_ctas_default_serde
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@rcfile_default_format_ctas_default_serde
@@ -268,6 +274,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@rcfile_default_format_ctas
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@rcfile_ctas_default_serde
+POSTHOOK: Lineage: rcfile_ctas_default_serde.key SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: rcfile_ctas_default_serde.value SIMPLE [(rcfile_default_format_ctas)rcfile_default_format_ctas.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: DESCRIBE FORMATTED rcfile_ctas_default_serde
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@rcfile_ctas_default_serde

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out b/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out
index 71b46e0..094cc8d 100644
--- a/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out
+++ b/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out
@@ -58,6 +58,8 @@ POSTHOOK: Input: default@sih_i_part@p=2
 POSTHOOK: Input: default@sih_i_part@p=3
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sih_src
+POSTHOOK: Lineage: sih_src.key SIMPLE [(sih_i_part)sih_i_part.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: sih_src.value SIMPLE [(sih_i_part)sih_i_part.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: create table sih_src2 as select key, value from sih_src order by key, value
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@sih_src
@@ -68,6 +70,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@sih_src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sih_src2
+POSTHOOK: Lineage: sih_src2.key SIMPLE [(sih_src)sih_src.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: sih_src2.value SIMPLE [(sih_src)sih_src.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- Relaxing hive.exec.mode.local.auto.input.files.max=1.
 -- Hadoop20 will not generate more splits than there are files (one).
 -- Hadoop23 generate splits correctly (four), hence the max needs to be adjusted to ensure running in local mode.

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/select_same_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/select_same_col.q.out b/ql/src/test/results/clientpositive/select_same_col.q.out
index f7362f0..66bf5c2 100644
--- a/ql/src/test/results/clientpositive/select_same_col.q.out
+++ b/ql/src/test/results/clientpositive/select_same_col.q.out
@@ -16,6 +16,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srclimit
+POSTHOOK: Lineage: srclimit.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srclimit.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select cast(value as binary), value from srclimit
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srclimit

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin.q.out b/ql/src/test/results/clientpositive/semijoin.q.out
index 7624fa7..6005f72 100644
--- a/ql/src/test/results/clientpositive/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/semijoin.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from t1 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -41,6 +43,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(t1)t1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t2 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t2
@@ -72,6 +76,8 @@ POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t3
+POSTHOOK: Lineage: t3.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), (t2)t2.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t3.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), (t2)t2.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t3 sort by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/semijoin3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/semijoin3.q.out b/ql/src/test/results/clientpositive/semijoin3.q.out
index d62c32f..aea7e45 100644
--- a/ql/src/test/results/clientpositive/semijoin3.q.out
+++ b/ql/src/test/results/clientpositive/semijoin3.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select cast(key as int) key, value from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -18,6 +20,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
 select count(1)
 from

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
index 9b28649..c874b45 100644
--- a/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_noskew.q.out
@@ -158,6 +158,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@noskew
+POSTHOOK: Lineage: noskew.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: noskew.value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from noskew
 PREHOOK: type: QUERY
 PREHOOK: Input: default@noskew

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
index 5fec08b..4556ab7 100644
--- a/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
+++ b/ql/src/test/results/clientpositive/skewjoin_onesideskew.q.out
@@ -210,6 +210,8 @@ POSTHOOK: Input: default@nonskewtable
 POSTHOOK: Input: default@skewtable
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@result
+POSTHOOK: Lineage: result.key SIMPLE [(skewtable)a.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: result.value SIMPLE [(skewtable)a.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM result
 PREHOOK: type: QUERY
 PREHOOK: Input: default@result

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
index 1606110..ff7c0aa 100644
--- a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
+++ b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out
@@ -492,6 +492,10 @@ POSTHOOK: Input: default@hive_test_smb_bucket2
 POSTHOOK: Input: default@hive_test_smb_bucket2@ds=2010-10-15
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@smb_mapjoin9_results
+POSTHOOK: Lineage: smb_mapjoin9_results.ds SIMPLE [(hive_test_smb_bucket2)b.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: smb_mapjoin9_results.k1 SIMPLE [(hive_test_smb_bucket2)b.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_mapjoin9_results.k2 SIMPLE [(hive_test_smb_bucket1)a.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: smb_mapjoin9_results.value SIMPLE [(hive_test_smb_bucket2)b.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: drop table smb_mapjoin9_results
 PREHOOK: type: DROPTABLE
 PREHOOK: Input: default@smb_mapjoin9_results

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
index f115f68..65f0c22 100644
--- a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
index b533711..26bee4e 100644
--- a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src order by key
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Stage-1:MAPRED' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out
index 490e957..086ad73 100644
--- a/ql/src/test/results/clientpositive/spark/ctas.q.out
+++ b/ql/src/test/results/clientpositive/spark/ctas.q.out
@@ -112,6 +112,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_CTAS1
+POSTHOOK: Lineage: nzhang_ctas1.k SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_CTAS1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas1
@@ -257,6 +259,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas2
+POSTHOOK: Lineage: nzhang_ctas2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas2
@@ -402,6 +406,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas3
+POSTHOOK: Lineage: nzhang_ctas3.conb EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas3.half_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas3
@@ -612,6 +618,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas4
+POSTHOOK: Lineage: nzhang_ctas4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas4
@@ -881,6 +889,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas5
+POSTHOOK: Lineage: nzhang_ctas5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -909,3 +919,5 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@nzhang_ctas6
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas7
+POSTHOOK: Lineage: nzhang_ctas7.key SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_ctas7.to SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:to, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/join41.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join41.q.out b/ql/src/test/results/clientpositive/spark/join41.q.out
index 07fdf1d..2c2f128 100644
--- a/ql/src/test/results/clientpositive/spark/join41.q.out
+++ b/ql/src/test/results/clientpositive/spark/join41.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s1
+POSTHOOK: Lineage: s1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN
 SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
index 49d1baa..cde7213 100644
--- a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
+++ b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@a
+POSTHOOK: Lineage: a.key SIMPLE []
+POSTHOOK: Lineage: a.value SCRIPT []
 PREHOOK: query: -- overlap on a
 explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
index d000ad7..c3a3511 100644
--- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_10
+POSTHOOK: Lineage: src_10.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_10.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table src_lv1 (key string, value string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/multi_join_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/multi_join_union.q.out b/ql/src/test/results/clientpositive/spark/multi_join_union.q.out
index 0f80836..52b3c74 100644
--- a/ql/src/test/results/clientpositive/spark/multi_join_union.q.out
+++ b/ql/src/test/results/clientpositive/spark/multi_join_union.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src11
+POSTHOOK: Lineage: src11.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src11.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src12 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src12
+POSTHOOK: Lineage: src12.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src13 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src13
+POSTHOOK: Lineage: src13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE src14 as SELECT * FROM src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -42,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src14
+POSTHOOK: Lineage: src14.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN SELECT * FROM 
 src11 a JOIN
 src12 b ON (a.key = b.key) JOIN


[04/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index bcc7790..13e30db 100644
--- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -264,6 +264,22 @@ public class ThriftHiveMetastore {
 
     public void cancel_delegation_token(String token_str_form) throws MetaException, org.apache.thrift.TException;
 
+    public boolean add_token(String token_identifier, String delegation_token) throws org.apache.thrift.TException;
+
+    public boolean remove_token(String token_identifier) throws org.apache.thrift.TException;
+
+    public String get_token(String token_identifier) throws org.apache.thrift.TException;
+
+    public List<String> get_all_token_identifiers() throws org.apache.thrift.TException;
+
+    public int add_master_key(String key) throws MetaException, org.apache.thrift.TException;
+
+    public void update_master_key(int seq_number, String key) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+
+    public boolean remove_master_key(int key_seq) throws org.apache.thrift.TException;
+
+    public List<String> get_master_keys() throws org.apache.thrift.TException;
+
     public GetOpenTxnsResponse get_open_txns() throws org.apache.thrift.TException;
 
     public GetOpenTxnsInfoResponse get_open_txns_info() throws org.apache.thrift.TException;
@@ -538,6 +554,22 @@ public class ThriftHiveMetastore {
 
     public void cancel_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
+    public void add_token(String token_identifier, String delegation_token, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void remove_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_all_token_identifiers(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void add_master_key(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void update_master_key(int seq_number, String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void remove_master_key(int key_seq, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+    public void get_master_keys(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
     public void get_open_txns(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void get_open_txns_info(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -3975,6 +4007,196 @@ public class ThriftHiveMetastore {
       return;
     }
 
+    public boolean add_token(String token_identifier, String delegation_token) throws org.apache.thrift.TException
+    {
+      send_add_token(token_identifier, delegation_token);
+      return recv_add_token();
+    }
+
+    public void send_add_token(String token_identifier, String delegation_token) throws org.apache.thrift.TException
+    {
+      add_token_args args = new add_token_args();
+      args.setToken_identifier(token_identifier);
+      args.setDelegation_token(delegation_token);
+      sendBase("add_token", args);
+    }
+
+    public boolean recv_add_token() throws org.apache.thrift.TException
+    {
+      add_token_result result = new add_token_result();
+      receiveBase(result, "add_token");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_token failed: unknown result");
+    }
+
+    public boolean remove_token(String token_identifier) throws org.apache.thrift.TException
+    {
+      send_remove_token(token_identifier);
+      return recv_remove_token();
+    }
+
+    public void send_remove_token(String token_identifier) throws org.apache.thrift.TException
+    {
+      remove_token_args args = new remove_token_args();
+      args.setToken_identifier(token_identifier);
+      sendBase("remove_token", args);
+    }
+
+    public boolean recv_remove_token() throws org.apache.thrift.TException
+    {
+      remove_token_result result = new remove_token_result();
+      receiveBase(result, "remove_token");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "remove_token failed: unknown result");
+    }
+
+    public String get_token(String token_identifier) throws org.apache.thrift.TException
+    {
+      send_get_token(token_identifier);
+      return recv_get_token();
+    }
+
+    public void send_get_token(String token_identifier) throws org.apache.thrift.TException
+    {
+      get_token_args args = new get_token_args();
+      args.setToken_identifier(token_identifier);
+      sendBase("get_token", args);
+    }
+
+    public String recv_get_token() throws org.apache.thrift.TException
+    {
+      get_token_result result = new get_token_result();
+      receiveBase(result, "get_token");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_token failed: unknown result");
+    }
+
+    public List<String> get_all_token_identifiers() throws org.apache.thrift.TException
+    {
+      send_get_all_token_identifiers();
+      return recv_get_all_token_identifiers();
+    }
+
+    public void send_get_all_token_identifiers() throws org.apache.thrift.TException
+    {
+      get_all_token_identifiers_args args = new get_all_token_identifiers_args();
+      sendBase("get_all_token_identifiers", args);
+    }
+
+    public List<String> recv_get_all_token_identifiers() throws org.apache.thrift.TException
+    {
+      get_all_token_identifiers_result result = new get_all_token_identifiers_result();
+      receiveBase(result, "get_all_token_identifiers");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_token_identifiers failed: unknown result");
+    }
+
+    public int add_master_key(String key) throws MetaException, org.apache.thrift.TException
+    {
+      send_add_master_key(key);
+      return recv_add_master_key();
+    }
+
+    public void send_add_master_key(String key) throws org.apache.thrift.TException
+    {
+      add_master_key_args args = new add_master_key_args();
+      args.setKey(key);
+      sendBase("add_master_key", args);
+    }
+
+    public int recv_add_master_key() throws MetaException, org.apache.thrift.TException
+    {
+      add_master_key_result result = new add_master_key_result();
+      receiveBase(result, "add_master_key");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "add_master_key failed: unknown result");
+    }
+
+    public void update_master_key(int seq_number, String key) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      send_update_master_key(seq_number, key);
+      recv_update_master_key();
+    }
+
+    public void send_update_master_key(int seq_number, String key) throws org.apache.thrift.TException
+    {
+      update_master_key_args args = new update_master_key_args();
+      args.setSeq_number(seq_number);
+      args.setKey(key);
+      sendBase("update_master_key", args);
+    }
+
+    public void recv_update_master_key() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+    {
+      update_master_key_result result = new update_master_key_result();
+      receiveBase(result, "update_master_key");
+      if (result.o1 != null) {
+        throw result.o1;
+      }
+      if (result.o2 != null) {
+        throw result.o2;
+      }
+      return;
+    }
+
+    public boolean remove_master_key(int key_seq) throws org.apache.thrift.TException
+    {
+      send_remove_master_key(key_seq);
+      return recv_remove_master_key();
+    }
+
+    public void send_remove_master_key(int key_seq) throws org.apache.thrift.TException
+    {
+      remove_master_key_args args = new remove_master_key_args();
+      args.setKey_seq(key_seq);
+      sendBase("remove_master_key", args);
+    }
+
+    public boolean recv_remove_master_key() throws org.apache.thrift.TException
+    {
+      remove_master_key_result result = new remove_master_key_result();
+      receiveBase(result, "remove_master_key");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "remove_master_key failed: unknown result");
+    }
+
+    public List<String> get_master_keys() throws org.apache.thrift.TException
+    {
+      send_get_master_keys();
+      return recv_get_master_keys();
+    }
+
+    public void send_get_master_keys() throws org.apache.thrift.TException
+    {
+      get_master_keys_args args = new get_master_keys_args();
+      sendBase("get_master_keys", args);
+    }
+
+    public List<String> recv_get_master_keys() throws org.apache.thrift.TException
+    {
+      get_master_keys_result result = new get_master_keys_result();
+      receiveBase(result, "get_master_keys");
+      if (result.isSetSuccess()) {
+        return result.success;
+      }
+      throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_master_keys failed: unknown result");
+    }
+
     public GetOpenTxnsResponse get_open_txns() throws org.apache.thrift.TException
     {
       send_get_open_txns();
@@ -8606,6 +8828,262 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public void add_token(String token_identifier, String delegation_token, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      add_token_call method_call = new add_token_call(token_identifier, delegation_token, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class add_token_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String token_identifier;
+      private String delegation_token;
+      public add_token_call(String token_identifier, String delegation_token, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.token_identifier = token_identifier;
+        this.delegation_token = delegation_token;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_token", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        add_token_args args = new add_token_args();
+        args.setToken_identifier(token_identifier);
+        args.setDelegation_token(delegation_token);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public boolean getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_add_token();
+      }
+    }
+
+    public void remove_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      remove_token_call method_call = new remove_token_call(token_identifier, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class remove_token_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String token_identifier;
+      public remove_token_call(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.token_identifier = token_identifier;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("remove_token", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        remove_token_args args = new remove_token_args();
+        args.setToken_identifier(token_identifier);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public boolean getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_remove_token();
+      }
+    }
+
+    public void get_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_token_call method_call = new get_token_call(token_identifier, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_token_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String token_identifier;
+      public get_token_call(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.token_identifier = token_identifier;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_token", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_token_args args = new get_token_args();
+        args.setToken_identifier(token_identifier);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public String getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_token();
+      }
+    }
+
+    public void get_all_token_identifiers(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_all_token_identifiers_call method_call = new get_all_token_identifiers_call(resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_all_token_identifiers_call extends org.apache.thrift.async.TAsyncMethodCall {
+      public get_all_token_identifiers_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_all_token_identifiers", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_all_token_identifiers_args args = new get_all_token_identifiers_args();
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<String> getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_all_token_identifiers();
+      }
+    }
+
+    public void add_master_key(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      add_master_key_call method_call = new add_master_key_call(key, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class add_master_key_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private String key;
+      public add_master_key_call(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.key = key;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("add_master_key", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        add_master_key_args args = new add_master_key_args();
+        args.setKey(key);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public int getResult() throws MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_add_master_key();
+      }
+    }
+
+    public void update_master_key(int seq_number, String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      update_master_key_call method_call = new update_master_key_call(seq_number, key, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class update_master_key_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private int seq_number;
+      private String key;
+      public update_master_key_call(int seq_number, String key, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.seq_number = seq_number;
+        this.key = key;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_master_key", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        update_master_key_args args = new update_master_key_args();
+        args.setSeq_number(seq_number);
+        args.setKey(key);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public void getResult() throws NoSuchObjectException, MetaException, org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        (new Client(prot)).recv_update_master_key();
+      }
+    }
+
+    public void remove_master_key(int key_seq, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      remove_master_key_call method_call = new remove_master_key_call(key_seq, resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class remove_master_key_call extends org.apache.thrift.async.TAsyncMethodCall {
+      private int key_seq;
+      public remove_master_key_call(int key_seq, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+        this.key_seq = key_seq;
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("remove_master_key", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        remove_master_key_args args = new remove_master_key_args();
+        args.setKey_seq(key_seq);
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public boolean getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_remove_master_key();
+      }
+    }
+
+    public void get_master_keys(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+      checkReady();
+      get_master_keys_call method_call = new get_master_keys_call(resultHandler, this, ___protocolFactory, ___transport);
+      this.___currentMethod = method_call;
+      ___manager.call(method_call);
+    }
+
+    public static class get_master_keys_call extends org.apache.thrift.async.TAsyncMethodCall {
+      public get_master_keys_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+        super(client, protocolFactory, transport, resultHandler, false);
+      }
+
+      public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+        prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_master_keys", org.apache.thrift.protocol.TMessageType.CALL, 0));
+        get_master_keys_args args = new get_master_keys_args();
+        args.write(prot);
+        prot.writeMessageEnd();
+      }
+
+      public List<String> getResult() throws org.apache.thrift.TException {
+        if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+          throw new IllegalStateException("Method call not finished!");
+        }
+        org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+        org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+        return (new Client(prot)).recv_get_master_keys();
+      }
+    }
+
     public void get_open_txns(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
       get_open_txns_call method_call = new get_open_txns_call(resultHandler, this, ___protocolFactory, ___transport);
@@ -9486,6 +9964,14 @@ public class ThriftHiveMetastore {
       processMap.put("get_delegation_token", new get_delegation_token());
       processMap.put("renew_delegation_token", new renew_delegation_token());
       processMap.put("cancel_delegation_token", new cancel_delegation_token());
+      processMap.put("add_token", new add_token());
+      processMap.put("remove_token", new remove_token());
+      processMap.put("get_token", new get_token());
+      processMap.put("get_all_token_identifiers", new get_all_token_identifiers());
+      processMap.put("add_master_key", new add_master_key());
+      processMap.put("update_master_key", new update_master_key());
+      processMap.put("remove_master_key", new remove_master_key());
+      processMap.put("get_master_keys", new get_master_keys());
       processMap.put("get_open_txns", new get_open_txns());
       processMap.put("get_open_txns_info", new get_open_txns_info());
       processMap.put("open_txns", new open_txns());
@@ -12457,6 +12943,180 @@ public class ThriftHiveMetastore {
       }
     }
 
+    public static class add_token<I extends Iface> extends org.apache.thrift.ProcessFunction<I, add_token_args> {
+      public add_token() {
+        super("add_token");
+      }
+
+      public add_token_args getEmptyArgsInstance() {
+        return new add_token_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public add_token_result getResult(I iface, add_token_args args) throws org.apache.thrift.TException {
+        add_token_result result = new add_token_result();
+        result.success = iface.add_token(args.token_identifier, args.delegation_token);
+        result.setSuccessIsSet(true);
+        return result;
+      }
+    }
+
+    public static class remove_token<I extends Iface> extends org.apache.thrift.ProcessFunction<I, remove_token_args> {
+      public remove_token() {
+        super("remove_token");
+      }
+
+      public remove_token_args getEmptyArgsInstance() {
+        return new remove_token_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public remove_token_result getResult(I iface, remove_token_args args) throws org.apache.thrift.TException {
+        remove_token_result result = new remove_token_result();
+        result.success = iface.remove_token(args.token_identifier);
+        result.setSuccessIsSet(true);
+        return result;
+      }
+    }
+
+    public static class get_token<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_token_args> {
+      public get_token() {
+        super("get_token");
+      }
+
+      public get_token_args getEmptyArgsInstance() {
+        return new get_token_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_token_result getResult(I iface, get_token_args args) throws org.apache.thrift.TException {
+        get_token_result result = new get_token_result();
+        result.success = iface.get_token(args.token_identifier);
+        return result;
+      }
+    }
+
+    public static class get_all_token_identifiers<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_all_token_identifiers_args> {
+      public get_all_token_identifiers() {
+        super("get_all_token_identifiers");
+      }
+
+      public get_all_token_identifiers_args getEmptyArgsInstance() {
+        return new get_all_token_identifiers_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_all_token_identifiers_result getResult(I iface, get_all_token_identifiers_args args) throws org.apache.thrift.TException {
+        get_all_token_identifiers_result result = new get_all_token_identifiers_result();
+        result.success = iface.get_all_token_identifiers();
+        return result;
+      }
+    }
+
+    public static class add_master_key<I extends Iface> extends org.apache.thrift.ProcessFunction<I, add_master_key_args> {
+      public add_master_key() {
+        super("add_master_key");
+      }
+
+      public add_master_key_args getEmptyArgsInstance() {
+        return new add_master_key_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public add_master_key_result getResult(I iface, add_master_key_args args) throws org.apache.thrift.TException {
+        add_master_key_result result = new add_master_key_result();
+        try {
+          result.success = iface.add_master_key(args.key);
+          result.setSuccessIsSet(true);
+        } catch (MetaException o1) {
+          result.o1 = o1;
+        }
+        return result;
+      }
+    }
+
+    public static class update_master_key<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_master_key_args> {
+      public update_master_key() {
+        super("update_master_key");
+      }
+
+      public update_master_key_args getEmptyArgsInstance() {
+        return new update_master_key_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public update_master_key_result getResult(I iface, update_master_key_args args) throws org.apache.thrift.TException {
+        update_master_key_result result = new update_master_key_result();
+        try {
+          iface.update_master_key(args.seq_number, args.key);
+        } catch (NoSuchObjectException o1) {
+          result.o1 = o1;
+        } catch (MetaException o2) {
+          result.o2 = o2;
+        }
+        return result;
+      }
+    }
+
+    public static class remove_master_key<I extends Iface> extends org.apache.thrift.ProcessFunction<I, remove_master_key_args> {
+      public remove_master_key() {
+        super("remove_master_key");
+      }
+
+      public remove_master_key_args getEmptyArgsInstance() {
+        return new remove_master_key_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public remove_master_key_result getResult(I iface, remove_master_key_args args) throws org.apache.thrift.TException {
+        remove_master_key_result result = new remove_master_key_result();
+        result.success = iface.remove_master_key(args.key_seq);
+        result.setSuccessIsSet(true);
+        return result;
+      }
+    }
+
+    public static class get_master_keys<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_master_keys_args> {
+      public get_master_keys() {
+        super("get_master_keys");
+      }
+
+      public get_master_keys_args getEmptyArgsInstance() {
+        return new get_master_keys_args();
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public get_master_keys_result getResult(I iface, get_master_keys_args args) throws org.apache.thrift.TException {
+        get_master_keys_result result = new get_master_keys_result();
+        result.success = iface.get_master_keys();
+        return result;
+      }
+    }
+
     public static class get_open_txns<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_open_txns_args> {
       public get_open_txns() {
         super("get_open_txns");
@@ -13105,6 +13765,14 @@ public class ThriftHiveMetastore {
       processMap.put("get_delegation_token", new get_delegation_token());
       processMap.put("renew_delegation_token", new renew_delegation_token());
       processMap.put("cancel_delegation_token", new cancel_delegation_token());
+      processMap.put("add_token", new add_token());
+      processMap.put("remove_token", new remove_token());
+      processMap.put("get_token", new get_token());
+      processMap.put("get_all_token_identifiers", new get_all_token_identifiers());
+      processMap.put("add_master_key", new add_master_key());
+      processMap.put("update_master_key", new update_master_key());
+      processMap.put("remove_master_key", new remove_master_key());
+      processMap.put("get_master_keys", new get_master_keys());
       processMap.put("get_open_txns", new get_open_txns());
       processMap.put("get_open_txns_info", new get_open_txns_info());
       processMap.put("open_txns", new open_txns());
@@ -20101,21 +20769,22 @@ public class ThriftHiveMetastore {
       }
     }
 
-    public static class get_open_txns<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_open_txns_args, GetOpenTxnsResponse> {
-      public get_open_txns() {
-        super("get_open_txns");
+    public static class add_token<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_token_args, Boolean> {
+      public add_token() {
+        super("add_token");
       }
 
-      public get_open_txns_args getEmptyArgsInstance() {
-        return new get_open_txns_args();
+      public add_token_args getEmptyArgsInstance() {
+        return new add_token_args();
       }
 
-      public AsyncMethodCallback<GetOpenTxnsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<GetOpenTxnsResponse>() { 
-          public void onComplete(GetOpenTxnsResponse o) {
-            get_open_txns_result result = new get_open_txns_result();
+        return new AsyncMethodCallback<Boolean>() { 
+          public void onComplete(Boolean o) {
+            add_token_result result = new add_token_result();
             result.success = o;
+            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20127,7 +20796,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_open_txns_result result = new get_open_txns_result();
+            add_token_result result = new add_token_result();
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -20147,26 +20816,27 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_open_txns_args args, org.apache.thrift.async.AsyncMethodCallback<GetOpenTxnsResponse> resultHandler) throws TException {
-        iface.get_open_txns(resultHandler);
+      public void start(I iface, add_token_args args, org.apache.thrift.async.AsyncMethodCallback<Boolean> resultHandler) throws TException {
+        iface.add_token(args.token_identifier, args.delegation_token,resultHandler);
       }
     }
 
-    public static class get_open_txns_info<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_open_txns_info_args, GetOpenTxnsInfoResponse> {
-      public get_open_txns_info() {
-        super("get_open_txns_info");
+    public static class remove_token<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, remove_token_args, Boolean> {
+      public remove_token() {
+        super("remove_token");
       }
 
-      public get_open_txns_info_args getEmptyArgsInstance() {
-        return new get_open_txns_info_args();
+      public remove_token_args getEmptyArgsInstance() {
+        return new remove_token_args();
       }
 
-      public AsyncMethodCallback<GetOpenTxnsInfoResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<GetOpenTxnsInfoResponse>() { 
-          public void onComplete(GetOpenTxnsInfoResponse o) {
-            get_open_txns_info_result result = new get_open_txns_info_result();
+        return new AsyncMethodCallback<Boolean>() { 
+          public void onComplete(Boolean o) {
+            remove_token_result result = new remove_token_result();
             result.success = o;
+            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20178,7 +20848,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            get_open_txns_info_result result = new get_open_txns_info_result();
+            remove_token_result result = new remove_token_result();
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -20198,25 +20868,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, get_open_txns_info_args args, org.apache.thrift.async.AsyncMethodCallback<GetOpenTxnsInfoResponse> resultHandler) throws TException {
-        iface.get_open_txns_info(resultHandler);
+      public void start(I iface, remove_token_args args, org.apache.thrift.async.AsyncMethodCallback<Boolean> resultHandler) throws TException {
+        iface.remove_token(args.token_identifier,resultHandler);
       }
     }
 
-    public static class open_txns<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, open_txns_args, OpenTxnsResponse> {
-      public open_txns() {
-        super("open_txns");
+    public static class get_token<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_token_args, String> {
+      public get_token() {
+        super("get_token");
       }
 
-      public open_txns_args getEmptyArgsInstance() {
-        return new open_txns_args();
+      public get_token_args getEmptyArgsInstance() {
+        return new get_token_args();
       }
 
-      public AsyncMethodCallback<OpenTxnsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<String> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<OpenTxnsResponse>() { 
-          public void onComplete(OpenTxnsResponse o) {
-            open_txns_result result = new open_txns_result();
+        return new AsyncMethodCallback<String>() { 
+          public void onComplete(String o) {
+            get_token_result result = new get_token_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20229,63 +20899,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            open_txns_result result = new open_txns_result();
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, open_txns_args args, org.apache.thrift.async.AsyncMethodCallback<OpenTxnsResponse> resultHandler) throws TException {
-        iface.open_txns(args.rqst,resultHandler);
-      }
-    }
-
-    public static class abort_txn<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, abort_txn_args, Void> {
-      public abort_txn() {
-        super("abort_txn");
-      }
-
-      public abort_txn_args getEmptyArgsInstance() {
-        return new abort_txn_args();
-      }
-
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            abort_txn_result result = new abort_txn_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            abort_txn_result result = new abort_txn_result();
-            if (e instanceof NoSuchTxnException) {
-                        result.o1 = (NoSuchTxnException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-             else 
+            get_token_result result = new get_token_result();
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -20305,86 +20919,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, abort_txn_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.abort_txn(args.rqst,resultHandler);
+      public void start(I iface, get_token_args args, org.apache.thrift.async.AsyncMethodCallback<String> resultHandler) throws TException {
+        iface.get_token(args.token_identifier,resultHandler);
       }
     }
 
-    public static class commit_txn<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, commit_txn_args, Void> {
-      public commit_txn() {
-        super("commit_txn");
+    public static class get_all_token_identifiers<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_all_token_identifiers_args, List<String>> {
+      public get_all_token_identifiers() {
+        super("get_all_token_identifiers");
       }
 
-      public commit_txn_args getEmptyArgsInstance() {
-        return new commit_txn_args();
+      public get_all_token_identifiers_args getEmptyArgsInstance() {
+        return new get_all_token_identifiers_args();
       }
 
-      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
-        final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Void>() { 
-          public void onComplete(Void o) {
-            commit_txn_result result = new commit_txn_result();
-            try {
-              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
-              return;
-            } catch (Exception e) {
-              LOGGER.error("Exception writing to internal frame buffer", e);
-            }
-            fb.close();
-          }
-          public void onError(Exception e) {
-            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
-            org.apache.thrift.TBase msg;
-            commit_txn_result result = new commit_txn_result();
-            if (e instanceof NoSuchTxnException) {
-                        result.o1 = (NoSuchTxnException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof TxnAbortedException) {
-                        result.o2 = (TxnAbortedException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
-            {
-              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
-              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
-            }
-            try {
-              fcall.sendResponse(fb,msg,msgType,seqid);
-              return;
-            } catch (Exception ex) {
-              LOGGER.error("Exception writing to internal frame buffer", ex);
-            }
-            fb.close();
-          }
-        };
-      }
-
-      protected boolean isOneway() {
-        return false;
-      }
-
-      public void start(I iface, commit_txn_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
-        iface.commit_txn(args.rqst,resultHandler);
-      }
-    }
-
-    public static class lock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, lock_args, LockResponse> {
-      public lock() {
-        super("lock");
-      }
-
-      public lock_args getEmptyArgsInstance() {
-        return new lock_args();
-      }
-
-      public AsyncMethodCallback<LockResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<LockResponse>() { 
-          public void onComplete(LockResponse o) {
-            lock_result result = new lock_result();
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            get_all_token_identifiers_result result = new get_all_token_identifiers_result();
             result.success = o;
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
@@ -20397,18 +20950,7 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            lock_result result = new lock_result();
-            if (e instanceof NoSuchTxnException) {
-                        result.o1 = (NoSuchTxnException) e;
-                        result.setO1IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof TxnAbortedException) {
-                        result.o2 = (TxnAbortedException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-             else 
+            get_all_token_identifiers_result result = new get_all_token_identifiers_result();
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
               msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
@@ -20428,26 +20970,27 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, lock_args args, org.apache.thrift.async.AsyncMethodCallback<LockResponse> resultHandler) throws TException {
-        iface.lock(args.rqst,resultHandler);
+      public void start(I iface, get_all_token_identifiers_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.get_all_token_identifiers(resultHandler);
       }
     }
 
-    public static class check_lock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, check_lock_args, LockResponse> {
-      public check_lock() {
-        super("check_lock");
+    public static class add_master_key<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, add_master_key_args, Integer> {
+      public add_master_key() {
+        super("add_master_key");
       }
 
-      public check_lock_args getEmptyArgsInstance() {
-        return new check_lock_args();
+      public add_master_key_args getEmptyArgsInstance() {
+        return new add_master_key_args();
       }
 
-      public AsyncMethodCallback<LockResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Integer> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<LockResponse>() { 
-          public void onComplete(LockResponse o) {
-            check_lock_result result = new check_lock_result();
+        return new AsyncMethodCallback<Integer>() { 
+          public void onComplete(Integer o) {
+            add_master_key_result result = new add_master_key_result();
             result.success = o;
+            result.setSuccessIsSet(true);
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20459,22 +21002,12 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            check_lock_result result = new check_lock_result();
-            if (e instanceof NoSuchTxnException) {
-                        result.o1 = (NoSuchTxnException) e;
+            add_master_key_result result = new add_master_key_result();
+            if (e instanceof MetaException) {
+                        result.o1 = (MetaException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof TxnAbortedException) {
-                        result.o2 = (TxnAbortedException) e;
-                        result.setO2IsSet(true);
-                        msg = result;
-            }
-            else             if (e instanceof NoSuchLockException) {
-                        result.o3 = (NoSuchLockException) e;
-                        result.setO3IsSet(true);
-                        msg = result;
-            }
              else 
             {
               msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
@@ -20495,25 +21028,25 @@ public class ThriftHiveMetastore {
         return false;
       }
 
-      public void start(I iface, check_lock_args args, org.apache.thrift.async.AsyncMethodCallback<LockResponse> resultHandler) throws TException {
-        iface.check_lock(args.rqst,resultHandler);
+      public void start(I iface, add_master_key_args args, org.apache.thrift.async.AsyncMethodCallback<Integer> resultHandler) throws TException {
+        iface.add_master_key(args.key,resultHandler);
       }
     }
 
-    public static class unlock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, unlock_args, Void> {
-      public unlock() {
-        super("unlock");
+    public static class update_master_key<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_master_key_args, Void> {
+      public update_master_key() {
+        super("update_master_key");
       }
 
-      public unlock_args getEmptyArgsInstance() {
-        return new unlock_args();
+      public update_master_key_args getEmptyArgsInstance() {
+        return new update_master_key_args();
       }
 
       public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
         return new AsyncMethodCallback<Void>() { 
           public void onComplete(Void o) {
-            unlock_result result = new unlock_result();
+            update_master_key_result result = new update_master_key_result();
             try {
               fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
               return;
@@ -20525,14 +21058,577 @@ public class ThriftHiveMetastore {
           public void onError(Exception e) {
             byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
             org.apache.thrift.TBase msg;
-            unlock_result result = new unlock_result();
-            if (e instanceof NoSuchLockException) {
-                        result.o1 = (NoSuchLockException) e;
+            update_master_key_result result = new update_master_key_result();
+            if (e instanceof NoSuchObjectException) {
+                        result.o1 = (NoSuchObjectException) e;
                         result.setO1IsSet(true);
                         msg = result;
             }
-            else             if (e instanceof TxnOpenException) {
-                        result.o2 = (TxnOpenException) e;
+            else             if (e instanceof MetaException) {
+                        result.o2 = (MetaException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, update_master_key_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.update_master_key(args.seq_number, args.key,resultHandler);
+      }
+    }
+
+    public static class remove_master_key<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, remove_master_key_args, Boolean> {
+      public remove_master_key() {
+        super("remove_master_key");
+      }
+
+      public remove_master_key_args getEmptyArgsInstance() {
+        return new remove_master_key_args();
+      }
+
+      public AsyncMethodCallback<Boolean> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Boolean>() { 
+          public void onComplete(Boolean o) {
+            remove_master_key_result result = new remove_master_key_result();
+            result.success = o;
+            result.setSuccessIsSet(true);
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            remove_master_key_result result = new remove_master_key_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, remove_master_key_args args, org.apache.thrift.async.AsyncMethodCallback<Boolean> resultHandler) throws TException {
+        iface.remove_master_key(args.key_seq,resultHandler);
+      }
+    }
+
+    public static class get_master_keys<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_master_keys_args, List<String>> {
+      public get_master_keys() {
+        super("get_master_keys");
+      }
+
+      public get_master_keys_args getEmptyArgsInstance() {
+        return new get_master_keys_args();
+      }
+
+      public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<List<String>>() { 
+          public void onComplete(List<String> o) {
+            get_master_keys_result result = new get_master_keys_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_master_keys_result result = new get_master_keys_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_master_keys_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException {
+        iface.get_master_keys(resultHandler);
+      }
+    }
+
+    public static class get_open_txns<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_open_txns_args, GetOpenTxnsResponse> {
+      public get_open_txns() {
+        super("get_open_txns");
+      }
+
+      public get_open_txns_args getEmptyArgsInstance() {
+        return new get_open_txns_args();
+      }
+
+      public AsyncMethodCallback<GetOpenTxnsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<GetOpenTxnsResponse>() { 
+          public void onComplete(GetOpenTxnsResponse o) {
+            get_open_txns_result result = new get_open_txns_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_open_txns_result result = new get_open_txns_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_open_txns_args args, org.apache.thrift.async.AsyncMethodCallback<GetOpenTxnsResponse> resultHandler) throws TException {
+        iface.get_open_txns(resultHandler);
+      }
+    }
+
+    public static class get_open_txns_info<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_open_txns_info_args, GetOpenTxnsInfoResponse> {
+      public get_open_txns_info() {
+        super("get_open_txns_info");
+      }
+
+      public get_open_txns_info_args getEmptyArgsInstance() {
+        return new get_open_txns_info_args();
+      }
+
+      public AsyncMethodCallback<GetOpenTxnsInfoResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<GetOpenTxnsInfoResponse>() { 
+          public void onComplete(GetOpenTxnsInfoResponse o) {
+            get_open_txns_info_result result = new get_open_txns_info_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            get_open_txns_info_result result = new get_open_txns_info_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, get_open_txns_info_args args, org.apache.thrift.async.AsyncMethodCallback<GetOpenTxnsInfoResponse> resultHandler) throws TException {
+        iface.get_open_txns_info(resultHandler);
+      }
+    }
+
+    public static class open_txns<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, open_txns_args, OpenTxnsResponse> {
+      public open_txns() {
+        super("open_txns");
+      }
+
+      public open_txns_args getEmptyArgsInstance() {
+        return new open_txns_args();
+      }
+
+      public AsyncMethodCallback<OpenTxnsResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<OpenTxnsResponse>() { 
+          public void onComplete(OpenTxnsResponse o) {
+            open_txns_result result = new open_txns_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            open_txns_result result = new open_txns_result();
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, open_txns_args args, org.apache.thrift.async.AsyncMethodCallback<OpenTxnsResponse> resultHandler) throws TException {
+        iface.open_txns(args.rqst,resultHandler);
+      }
+    }
+
+    public static class abort_txn<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, abort_txn_args, Void> {
+      public abort_txn() {
+        super("abort_txn");
+      }
+
+      public abort_txn_args getEmptyArgsInstance() {
+        return new abort_txn_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            abort_txn_result result = new abort_txn_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            abort_txn_result result = new abort_txn_result();
+            if (e instanceof NoSuchTxnException) {
+                        result.o1 = (NoSuchTxnException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, abort_txn_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.abort_txn(args.rqst,resultHandler);
+      }
+    }
+
+    public static class commit_txn<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, commit_txn_args, Void> {
+      public commit_txn() {
+        super("commit_txn");
+      }
+
+      public commit_txn_args getEmptyArgsInstance() {
+        return new commit_txn_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            commit_txn_result result = new commit_txn_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            commit_txn_result result = new commit_txn_result();
+            if (e instanceof NoSuchTxnException) {
+                        result.o1 = (NoSuchTxnException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof TxnAbortedException) {
+                        result.o2 = (TxnAbortedException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, commit_txn_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException {
+        iface.commit_txn(args.rqst,resultHandler);
+      }
+    }
+
+    public static class lock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, lock_args, LockResponse> {
+      public lock() {
+        super("lock");
+      }
+
+      public lock_args getEmptyArgsInstance() {
+        return new lock_args();
+      }
+
+      public AsyncMethodCallback<LockResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<LockResponse>() { 
+          public void onComplete(LockResponse o) {
+            lock_result result = new lock_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            lock_result result = new lock_result();
+            if (e instanceof NoSuchTxnException) {
+                        result.o1 = (NoSuchTxnException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof TxnAbortedException) {
+                        result.o2 = (TxnAbortedException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, lock_args args, org.apache.thrift.async.AsyncMethodCallback<LockResponse> resultHandler) throws TException {
+        iface.lock(args.rqst,resultHandler);
+      }
+    }
+
+    public static class check_lock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, check_lock_args, LockResponse> {
+      public check_lock() {
+        super("check_lock");
+      }
+
+      public check_lock_args getEmptyArgsInstance() {
+        return new check_lock_args();
+      }
+
+      public AsyncMethodCallback<LockResponse> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<LockResponse>() { 
+          public void onComplete(LockResponse o) {
+            check_lock_result result = new check_lock_result();
+            result.success = o;
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            check_lock_result result = new check_lock_result();
+            if (e instanceof NoSuchTxnException) {
+                        result.o1 = (NoSuchTxnException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof TxnAbortedException) {
+                        result.o2 = (TxnAbortedException) e;
+                        result.setO2IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof NoSuchLockException) {
+                        result.o3 = (NoSuchLockException) e;
+                        result.setO3IsSet(true);
+                        msg = result;
+            }
+             else 
+            {
+              msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION;
+              msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage());
+            }
+            try {
+              fcall.sendResponse(fb,msg,msgType,seqid);
+              return;
+            } catch (Exception ex) {
+              LOGGER.error("Exception writing to internal frame buffer", ex);
+            }
+            fb.close();
+          }
+        };
+      }
+
+      protected boolean isOneway() {
+        return false;
+      }
+
+      public void start(I iface, check_lock_args args, org.apache.thrift.async.AsyncMethodCallback<LockResponse> resultHandler) throws TException {
+        iface.check_lock(args.rqst,resultHandler);
+      }
+    }
+
+    public static class unlock<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, unlock_args, Void> {
+      public unlock() {
+        super("unlock");
+      }
+
+      public unlock_args getEmptyArgsInstance() {
+        return new unlock_args();
+      }
+
+      public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+        final org.apache.thrift.AsyncProcessFunction fcall = this;
+        return new AsyncMethodCallback<Void>() { 
+          public void onComplete(Void o) {
+            unlock_result result = new unlock_result();
+            try {
+              fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid);
+              return;
+            } catch (Exception e) {
+              LOGGER.error("Exception writing to internal frame buffer", e);
+            }
+            fb.close();
+          }
+          public void onError(Exception e) {
+            byte msgType = org.apache.thrift.protocol.TMessageType.REPLY;
+            org.apache.thrift.TBase msg;
+            unlock_result result = new unlock_result();
+            if (e instanceof NoSuchLockException) {
+                        result.o1 = (NoSuchLockException) e;
+                        result.setO1IsSet(true);
+                        msg = result;
+            }
+            else             if (e instanceof TxnOpenException) {
+                        result.o2 = (TxnOpenException) e;
                         result.setO2IsSet(true);
                         msg = result;
             }
@@ -141432,85 +142528,6312 @@ public class ThriftHiveMetastore {
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
       tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+      tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+      metaDataMap = Collections.unmodifiableMap(tmpMap);
+      org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_ugi_result.class, metaDataMap);
+    }
+
+    public set_ugi_result() {
+    }
+
+    public set_ugi_result(
+      List<String> success,
+      MetaException o1)
+    {
+      this();
+      this.success = success;
+      this.o1 = o1;
+    }
+
+    /**
+     * Performs a deep copy on <i>other</i>.
+     */
+    public set_ugi_result(set_ugi_result other) {
+      if (other.isSetSuccess()) {
+        List<String> __this__success = new ArrayList<String>(other.success);
+        this.success = __this__success;
+      }
+      if (other.isSetO1()) {
+        this.o1 = new MetaException(other.o1);
+      }
+    }
+
+    public set_ugi_result deepCopy() {
+      return new set_ugi_result(this);
+    }
+
+    @Override
+    public void clear() {
+      this.success = null;
+      this.o1 = null;
+    }
+
+    public int getSuccessSize() {
+      return (this.success == null) ? 0 : this.success.size();
+    }
+
+    public java.util.Iterator<String> getSuccessIterator() {
+      return (this.success == null) ? null : this.success.iterator();
+    }
+
+    public void addToSuccess(String elem) {
+      if (this.success == null) {
+        this.success = new ArrayList<String>();
+      }
+      this.success.add(elem);
+    }
+
+    public List<String> getSuccess() {
+      return this.success;
+    }
+
+    public void setSuccess(List<String> success) {
+      this.success = success;
+    }
+
+    public void unsetSuccess() {
+      this.success = null;
+    }
+
+    /** Returns true if field success is set (has been assigned a value) and false otherwise */
+    public boolean isSetSuccess() {
+      return this.success != null;
+    }
+
+    public void setSuccessIsSet(boolean value) {
+      if (!value) {
+        this.success = null;
+      }
+    }
+
+    public MetaException getO1() {
+      return this.o1;
+    }
+
+    public void setO1(MetaException o1) {
+      this.o1 = o1;
+    }
+
+    public void unsetO1() {
+      this.o1 = null;
+    }
+
+    /** Returns true if field o1 is set (has been assigned a value) and false otherwise */
+    public boolean isSetO1() {
+      return this.o1 != null;
+    }
+
+    public void setO1IsSet(boolean value) {
+      if (!value) {
+        this.o1 = null;
+      }
+    }
+
+    public void setFieldValue(_Fields field, Object value) {
+      switch (field) {
+      case SUCCESS:
+        if (value == null) {
+          unsetSuccess();
+        } else {
+          setSuccess((List<String>)value);
+        }
+        break;
+
+      case O1:
+        if (value == null) {
+          unsetO1();
+        } else {
+          setO1((MetaException)value);
+        }
+        break;
+
+      }
+    }
+
+    public Object getFieldValue(_Fields field) {
+      switch (field) {
+      case SUCCESS:
+        return getSuccess();
+
+      case O1:
+        return getO1();
+
+      }
+      throw new IllegalStateException();
+    }
+
+    /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+    public boolean isSet(_Fields field) {
+      if (field == null) {
+        throw new IllegalArgumentException();
+      }
+
+      switch (field) {
+      case SUCCESS:
+        return isSetSuccess();
+      case O1:
+        return isSetO1();
+      }
+      throw new IllegalStateException();
+    }
+
+    @Override
+    public boolean equals(Object that) {
+      if (that == null)
+        return false;
+      if (that instanceof set_ugi_result)
+        return this.equals((set_ugi_result)that);
+      return false;
+    }
+
+    public boolean equals(set_ugi_result that) {
+      if (that == null)
+        return false;
+
+      boolean this_present_success = true && this.isSetSuccess();
+      boolean that_present_success = true && that.isSetSuccess();
+      if (this_present_success || that_present_success) {
+        if (!(this_present_success && that_present_success))
+          return false;
+        if (!this.success.equals(that.success))
+          return false;
+      }
+
+      boolean this_present_o1 = true && this.isSetO1();
+      boolean that_present_o1 = true && that.isSetO1();
+      if (this_present_o1 || that_present_o1) {
+        if (!(this_present_o1 && that_present_o1))
+          return false;
+        if (!this.o1.equals(that.o1))
+          return false;
+      }
+
+      return true;
+    }
+
+    @Override
+    public int hashCode() {
+      List<Object> list = new ArrayList<Object>();
+
+      boolean present_success = true && (isSetSuccess());
+      list.add(present_success);
+      if (present_success)
+        list.add(success);
+
+      boolean present_o1 = true && (isSetO1());
+      list.add(present_o1);
+      if (present_o1)
+        list.add(o1);
+
+      return list.hashCode();
+    }
+
+    @Override
+    public int compareTo(set_ugi_result other) {
+      if (!getClass().equals(other.getClass())) {
+        return getClass().getName().compareTo(other.getClass().getName());
+      }
+
+      int lastComparison = 0;
+
+      lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetSuccess()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1());
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+      if (isSetO1()) {
+        lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1);
+        if (lastComparison != 0) {
+          return lastComparison;
+        }
+      }
+      return 0;
+    }
+
+    public _Fields fieldForId(int fieldId) {
+      return _Fields.findByThriftId(fieldId);
+    }
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+      schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+      schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+      }
+
+    @Override
+    public String toString() {
+      StringBuilder sb = new StringBuilder("set_ugi_result(");
+      boolean first = true;
+
+      sb.append("success:");
+      if (this.success == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.success);
+      }
+      first = false;
+      if (!first) sb.append(", ");
+      sb.append("o1:");
+      if (this.o1 == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.o1);
+      }
+      first = false;
+      sb.append(")");
+      return sb.toString();
+    }
+
+    public void validate() throws org.apache.thrift.TException {
+      // check for required fields
+      // check for sub-struct validity
+    }
+
+    private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+      try {
+        write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+      try {
+        read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+      } catch (org.apache.thrift.TException te) {
+        throw new java.io.IOException(te);
+      }
+    }
+
+    private static class set_ugi_resultStandardSchemeFactory implements SchemeFactory {
+      public set_ugi_resultStandardScheme getScheme() {
+        return new set_ugi_resultStandardScheme();
+      }
+    }
+
+    private static class set_ugi_resultStandardScheme extends StandardScheme<set_ugi_result> {
+
+      public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result struct) throws org.apache.thrift.TException {
+        org.apache.thrift.protocol.TField schemeField;
+        iprot.readStructBegin();
+        while (true)
+        {
+          schemeField = iprot.readFieldBeg

<TRUNCATED>

[07/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/87131d0c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/87131d0c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/87131d0c

Branch: refs/heads/llap
Commit: 87131d0c7cce973d8792ed354f70ec73b0f52dcd
Parents: 41e8201
Author: ctang <ct...@cloudera.com>
Authored: Tue Mar 8 15:05:54 2016 -0500
Committer: ctang <ct...@cloudera.com>
Committed: Tue Mar 8 15:05:54 2016 -0500

----------------------------------------------------------------------
 .../org/apache/hive/minikdc/MiniHiveKdc.java    |    15 +
 .../hive/minikdc/TestJdbcWithDBTokenStore.java  |    40 +
 .../hive/minikdc/TestJdbcWithMiniKdc.java       |    12 +-
 .../hadoop/hive/thrift/TestDBTokenStore.java    |     3 +-
 metastore/if/hive_metastore.thrift              |    24 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 22340 ++++++++++-------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  1409 +-
 .../ThriftHiveMetastore_server.skeleton.cpp     |    40 +
 .../hive/metastore/api/ThriftHiveMetastore.java |  9220 ++++++-
 .../gen-php/metastore/ThriftHiveMetastore.php   |  1756 ++
 .../hive_metastore/ThriftHiveMetastore-remote   |    56 +
 .../hive_metastore/ThriftHiveMetastore.py       |  1639 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   450 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   159 +
 .../hive/metastore/HiveMetaStoreClient.java     |    42 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |    19 +-
 .../gen-py/TCLIService/TCLIService-remote       |     0
 .../hive/service/auth/HiveAuthFactory.java      |    33 +-
 .../apache/hadoop/hive/thrift/DBTokenStore.java |    49 +-
 .../hive/thrift/HiveDelegationTokenManager.java |     1 +
 20 files changed, 26383 insertions(+), 10924 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
index dedbf35..4e3a9c5 100644
--- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
+++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/MiniHiveKdc.java
@@ -179,5 +179,20 @@ public class MiniHiveKdc {
         withMiniKdc(hivePrincipal, hiveKeytab).build();
   }
 
+  /**
+   * Create a MiniHS2 with the hive service principal and keytab in MiniHiveKdc
+   * @param miniHiveKdc
+   * @param hiveConf
+   * @return new MiniHS2 instance
+   * @throws Exception
+   */
+  public static MiniHS2 getMiniHS2WithKerbWithRemoteHMS(MiniHiveKdc miniHiveKdc, HiveConf hiveConf) throws Exception {
+    String hivePrincipal =
+        miniHiveKdc.getFullyQualifiedServicePrincipal(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL);
+    String hiveKeytab = miniHiveKdc.getKeyTabFile(
+        miniHiveKdc.getServicePrincipalForUser(MiniHiveKdc.HIVE_SERVICE_PRINCIPAL));
 
+    return new MiniHS2.Builder().withConf(hiveConf).withRemoteMetastore().
+        withMiniKdc(hivePrincipal, hiveKeytab).build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
new file mode 100644
index 0000000..d690aaa
--- /dev/null
+++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.minikdc;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.junit.BeforeClass;
+
+public class TestJdbcWithDBTokenStore extends TestJdbcWithMiniKdc{
+
+  @BeforeClass
+  public static void beforeTest() throws Exception {
+    Class.forName(MiniHS2.getJdbcDriverName());
+    confOverlay.put(ConfVars.HIVE_SERVER2_SESSION_HOOK.varname,
+        SessionHookTest.class.getName());
+
+    HiveConf hiveConf = new HiveConf();
+    hiveConf.setVar(ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS, "org.apache.hadoop.hive.thrift.DBTokenStore");
+    miniHiveKdc = MiniHiveKdc.getMiniHiveKdc(hiveConf);
+    miniHS2 = MiniHiveKdc.getMiniHS2WithKerbWithRemoteHMS(miniHiveKdc, hiveConf);
+    miniHS2.start(confOverlay);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java
index 3ef2ce3..71a08fb 100644
--- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java
+++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdc.java
@@ -57,10 +57,10 @@ public class TestJdbcWithMiniKdc {
     }
   }
 
-  private static MiniHS2 miniHS2 = null;
-  private static MiniHiveKdc miniHiveKdc = null;
-  private static Map<String, String> confOverlay = new HashMap<String, String>();
-  private Connection hs2Conn;
+  protected static MiniHS2 miniHS2 = null;
+  protected static MiniHiveKdc miniHiveKdc = null;
+  protected static Map<String, String> confOverlay = new HashMap<String, String>();
+  protected Connection hs2Conn;
 
   @BeforeClass
   public static void beforeTest() throws Exception {
@@ -241,7 +241,7 @@ public class TestJdbcWithMiniKdc {
    * @param expectedValue
    * @throws Exception
    */
-  private void verifyProperty(String propertyName, String expectedValue) throws Exception {
+  protected void verifyProperty(String propertyName, String expectedValue) throws Exception {
     Statement stmt = hs2Conn .createStatement();
     ResultSet res = stmt.executeQuery("set " + propertyName);
     assertTrue(res.next());
@@ -251,7 +251,7 @@ public class TestJdbcWithMiniKdc {
   }
 
   // Store the given token in the UGI
-  private void storeToken(String tokenStr, UserGroupInformation ugi)
+  protected void storeToken(String tokenStr, UserGroupInformation ugi)
       throws Exception {
     Utils.setTokenStr(ugi,
         tokenStr, HiveAuthFactory.HS2_CLIENT_TOKEN);

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestDBTokenStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestDBTokenStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestDBTokenStore.java
index f5934ee..4bfa224 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestDBTokenStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestDBTokenStore.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.thrift.DelegationTokenStore.TokenStoreException;
+import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
 import org.apache.hadoop.security.token.delegation.HiveDelegationTokenSupport;
@@ -37,7 +38,7 @@ public class TestDBTokenStore extends TestCase{
   public void testDBTokenStore() throws TokenStoreException, MetaException, IOException {
 
     DelegationTokenStore ts = new DBTokenStore();
-    ts.init(new HMSHandler("Test handler"), null);
+    ts.init(new HMSHandler("Test handler"), ServerMode.METASTORE);
     assertEquals(0, ts.getMasterKeys().length);
     assertEquals(false,ts.removeMasterKey(-1));
     try{

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/if/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift
index 3635054..6a55962 100755
--- a/metastore/if/hive_metastore.thrift
+++ b/metastore/if/hive_metastore.thrift
@@ -1294,6 +1294,30 @@ service ThriftHiveMetastore extends fb303.FacebookService
   // method to cancel delegation token obtained from metastore server
   void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
 
+  // add a delegation token
+  bool add_token(1:string token_identifier, 2:string delegation_token)
+
+  // remove a delegation token
+  bool remove_token(1:string token_identifier)
+
+  // get a delegation token by identifier
+  string get_token(1:string token_identifier)
+
+  // get all delegation token identifiers
+  list<string> get_all_token_identifiers()
+
+  // add master key
+  i32 add_master_key(1:string key) throws (1:MetaException o1)
+
+  // update master key
+  void update_master_key(1:i32 seq_number, 2:string key) throws (1:NoSuchObjectException o1, 2:MetaException o2)
+
+  // remove master key
+  bool remove_master_key(1:i32 key_seq)
+
+  // get master keys
+  list<string> get_master_keys()
+
   // Transaction and lock management calls
   // Get just list of open transactions
   GetOpenTxnsResponse get_open_txns()


[17/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/temp_table_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/temp_table_join1.q.out b/ql/src/test/results/clientpositive/temp_table_join1.q.out
index bf41f8a..aafc263 100644
--- a/ql/src/test/results/clientpositive/temp_table_join1.q.out
+++ b/ql/src/test/results/clientpositive/temp_table_join1.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_nontemp
+POSTHOOK: Lineage: src_nontemp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_nontemp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
index 3bc4e76..470590a 100644
--- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Shuffle Join MERGEJOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
index 09f2fc5..68df37d 100644
--- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out
@@ -14,6 +14,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@A
+POSTHOOK: Lineage: a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table B as
 select * from src order by key
 limit 10
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@B
+POSTHOOK: Lineage: b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 Warning: Map Join MAPJOIN[10][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: explain select * from A join B
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/ctas.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out
index 0259c02..52ea981 100644
--- a/ql/src/test/results/clientpositive/tez/ctas.q.out
+++ b/ql/src/test/results/clientpositive/tez/ctas.q.out
@@ -117,6 +117,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_CTAS1
+POSTHOOK: Lineage: nzhang_ctas1.k SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_CTAS1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas1
@@ -267,6 +269,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas2
+POSTHOOK: Lineage: nzhang_ctas2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas2
@@ -417,6 +421,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas3
+POSTHOOK: Lineage: nzhang_ctas3.conb EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas3.half_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas3
@@ -632,6 +638,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas4
+POSTHOOK: Lineage: nzhang_ctas4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from nzhang_ctas4
 PREHOOK: type: QUERY
 PREHOOK: Input: default@nzhang_ctas4
@@ -906,6 +914,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas5
+POSTHOOK: Lineage: nzhang_ctas5.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas5.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table nzhang_ctas6 (key string, `to` string)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -934,3 +944,5 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@nzhang_ctas6
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas7
+POSTHOOK: Lineage: nzhang_ctas7.key SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: nzhang_ctas7.to SIMPLE [(nzhang_ctas6)nzhang_ctas6.FieldSchema(name:to, type:string, comment:null), ]

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/cte_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_2.q.out b/ql/src/test/results/clientpositive/tez/cte_2.q.out
index 23f8ec6..017bded 100644
--- a/ql/src/test/results/clientpositive/tez/cte_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/cte_2.q.out
@@ -94,6 +94,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s2
+POSTHOOK: Lineage: s2.key SIMPLE []
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/cte_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_4.q.out b/ql/src/test/results/clientpositive/tez/cte_4.q.out
index d560d74..6385abe 100644
--- a/ql/src/test/results/clientpositive/tez/cte_4.q.out
+++ b/ql/src/test/results/clientpositive/tez/cte_4.q.out
@@ -124,6 +124,7 @@ POSTHOOK: Output: database:default
 POSTHOOK: Output: default@q1
 POSTHOOK: Output: default@s2
 #### A masked pattern was here ####
+POSTHOOK: Lineage: s2.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: select * from s2
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
index b4b14c2..159415d 100644
--- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out
@@ -131,6 +131,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date
+POSTHOOK: Lineage: srcpart_date.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -149,6 +151,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_hour
+POSTHOOK: Lineage: srcpart_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -167,6 +171,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date_hour
+POSTHOOK: Lineage: srcpart_date_hour.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -185,6 +193,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_double_hour
+POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: -- single column, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/empty_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/empty_join.q.out b/ql/src/test/results/clientpositive/tez/empty_join.q.out
index 4c89317..8a0c040 100644
--- a/ql/src/test/results/clientpositive/tez/empty_join.q.out
+++ b/ql/src/test/results/clientpositive/tez/empty_join.q.out
@@ -12,6 +12,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test_1
+POSTHOOK: Lineage: test_1.id SIMPLE []
 PREHOOK: query: DROP TABLE IF EXISTS test_2
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS test_2
@@ -38,6 +39,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: _dummy_database@_dummy_table
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@test_3
+POSTHOOK: Lineage: test_3.id SIMPLE []
 PREHOOK: query: explain
 SELECT t1.id, t2.id, t3.id
 FROM test_1 t1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index 70a1322..a3ff85c 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -3378,6 +3378,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_CTAS1
+POSTHOOK: Lineage: nzhang_ctas1.k SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: query: explain create table nzhang_ctas3 row format serde "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" stored as RCFile as select key/2 half_key, concat(value, "_con") conb  from src sort by half_key, conb limit 10
@@ -3429,6 +3431,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@nzhang_ctas3
+POSTHOOK: Lineage: nzhang_ctas3.conb EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: nzhang_ctas3.half_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2
 PREHOOK: type: CREATETABLE
 POSTHOOK: query: explain create table if not exists nzhang_ctas3 as select key, value from src sort by key, value limit 2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
index 3f948a5..1222b94 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_3.q.out
@@ -288,6 +288,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain grant select on table src_autho_test to user hive_test_user
 PREHOOK: type: GRANT_PRIVILEGE
 POSTHOOK: query: explain grant select on table src_autho_test to user hive_test_user

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
index 3a1b937..a8eb0d3 100644
--- a/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/hybridgrace_hashjoin_1.q.out
@@ -1244,6 +1244,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_mapjoin
+POSTHOOK: Lineage: decimal_mapjoin.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_mapjoin.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: EXPLAIN SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2
   FROM decimal_mapjoin l

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/llap_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/llap_nullscan.q.out b/ql/src/test/results/clientpositive/tez/llap_nullscan.q.out
index 099bfe6..39f04ea 100644
--- a/ql/src/test/results/clientpositive/tez/llap_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/tez/llap_nullscan.q.out
@@ -20,6 +20,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc
+POSTHOOK: Lineage: src_orc.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: src_orc.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: src_orc.key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc.value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain extended
 select * from src_orc where 1=2
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/llapdecider.q.out b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
index a2d7f2a..2b0e639 100644
--- a/ql/src/test/results/clientpositive/tez/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
@@ -81,6 +81,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_orc
+POSTHOOK: Lineage: src_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_orc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT key, count(value) as cnt FROM src_orc GROUP BY key ORDER BY cnt

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/temp_table.q.out b/ql/src/test/results/clientpositive/tez/temp_table.q.out
index d31dfb1..1c10a4e 100644
--- a/ql/src/test/results/clientpositive/tez/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/tez/temp_table.q.out
@@ -475,6 +475,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select count(*) from s
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/tez_dml.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_dml.q.out b/ql/src/test/results/clientpositive/tez/tez_dml.q.out
index f80eaa7..43996cd 100644
--- a/ql/src/test/results/clientpositive/tez/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_dml.q.out
@@ -104,6 +104,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tmp_src
+POSTHOOK: Lineage: tmp_src.cnt EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: tmp_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: SELECT * FROM tmp_src
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tmp_src
@@ -1478,6 +1480,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tmp_src_part
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@empty
+POSTHOOK: Lineage: empty.c SIMPLE [(tmp_src_part)tmp_src_part.FieldSchema(name:c, type:string, comment:null), ]
+POSTHOOK: Lineage: empty.d SIMPLE []
 PREHOOK: query: SELECT * FROM empty
 PREHOOK: type: QUERY
 PREHOOK: Input: default@empty

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out b/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
index 58c4c86..180bcc6 100644
--- a/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_join_result_complex.q.out
@@ -667,6 +667,22 @@ POSTHOOK: Input: default@ct_events_clean
 POSTHOOK: Input: default@service_request_clean
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ct_events1_test
+POSTHOOK: Lineage: ct_events1_test.ce_create_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_create_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_end_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_end_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_notes SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cmpltyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cmpltyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctevs_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cnctevs_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctmd_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cnctmd_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cntvnst_stts_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cntvnst_stts_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_event_id SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_event_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_mode SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_mode, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_type SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_type, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.notes SIMPLE [(service_request_clean)b.FieldSchema(name:notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.src SIMPLE [(service_request_clean)b.FieldSchema(name:sum_reason_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqct_cds SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqct_cds, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqst_id SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqst_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrtyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:svcrtyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.total_transfers SIMPLE [(ct_events_clean)a.FieldSchema(name:total_transfers, type:int, comment:null), ]
 PREHOOK: query: select * from ct_events1_test
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ct_events1_test
@@ -1889,6 +1905,22 @@ POSTHOOK: Input: default@ct_events_clean
 POSTHOOK: Input: default@service_request_clean
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ct_events1_test
+POSTHOOK: Lineage: ct_events1_test.ce_create_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_create_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_end_dt SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_end_dt, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.ce_notes SIMPLE [(ct_events_clean)a.FieldSchema(name:ce_notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cmpltyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cmpltyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctevs_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cnctevs_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cnctmd_cd SIMPLE [(service_request_clean)b.FieldSchema(name:cnctmd_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.cntvnst_stts_cd SIMPLE [(ct_events_clean)a.FieldSchema(name:cntvnst_stts_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_event_id SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_event_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_mode SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_mode, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.contact_type SIMPLE [(ct_events_clean)a.FieldSchema(name:contact_type, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.notes SIMPLE [(service_request_clean)b.FieldSchema(name:notes, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.src SIMPLE [(service_request_clean)b.FieldSchema(name:sum_reason_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqct_cds SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqct_cds, type:array<string>, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrqst_id SIMPLE [(service_request_clean)b.FieldSchema(name:svcrqst_id, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.svcrtyp_cd SIMPLE [(service_request_clean)b.FieldSchema(name:svcrtyp_cd, type:string, comment:null), ]
+POSTHOOK: Lineage: ct_events1_test.total_transfers SIMPLE [(ct_events_clean)a.FieldSchema(name:total_transfers, type:int, comment:null), ]
 PREHOOK: query: select * from ct_events1_test
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ct_events1_test

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/tez_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out
index 2c74605..bf03c1f 100644
--- a/ql/src/test/results/clientpositive/tez/tez_union.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out
@@ -109,6 +109,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.key EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.value EXPRESSION [(src)s1.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by key, value limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -299,6 +301,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.cnt EXPRESSION [(src)src.null, ]
 PREHOOK: query: select * from ut order by cnt limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -444,6 +447,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by skey, ukey limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -800,6 +805,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.lkey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)s1.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by skey, ukey, lkey limit 100
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -1054,6 +1062,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.key EXPRESSION [(src)s2.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by key limit 30
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut
@@ -1311,6 +1320,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@ut
+POSTHOOK: Lineage: ut.skey SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: ut.ukey EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: select * from ut order by ukey, skey limit 20
 PREHOOK: type: QUERY
 PREHOOK: Input: default@ut

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out b/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
index 047c104..bef2365 100644
--- a/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/unionDistinct_1.q.out
@@ -7778,6 +7778,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src2
+POSTHOOK: Lineage: src2.count EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table src3 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -7788,6 +7790,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src3
+POSTHOOK: Lineage: src3.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src3.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src4 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -7798,6 +7802,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src4
+POSTHOOK: Lineage: src4.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src4.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src5 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -7808,6 +7814,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src5.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: explain extended
 select s.key, s.count from (
   select key, count from src2  where key < 10
@@ -11134,6 +11142,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_same
+POSTHOOK: Lineage: jackson_sev_same.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_same.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table dim_pho as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -11144,6 +11154,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dim_pho
+POSTHOOK: Lineage: dim_pho.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dim_pho.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table jackson_sev_add as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -11154,6 +11166,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_add
+POSTHOOK: Lineage: jackson_sev_add.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_add.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho UNION DISTINCT select * from jackson_sev_add)b on a.key=b.key and b.key=97
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho UNION DISTINCT select * from jackson_sev_add)b on a.key=b.key and b.key=97
@@ -12459,6 +12473,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select * from src where key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -12469,6 +12485,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t3(key string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -13031,6 +13049,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -13041,6 +13061,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.cnt EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table t7(c1 string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -13348,6 +13370,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -13358,6 +13382,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- Test simple union with double
 EXPLAIN
 SELECT * FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out b/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
index f4cdd86..bd4ca72 100644
--- a/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
+++ b/ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u1
+POSTHOOK: Lineage: u1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u2 as select key, value from src order by key limit 3
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u2
+POSTHOOK: Lineage: u2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE u3 as select key, value from src order by key desc limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@u3
+POSTHOOK: Lineage: u3.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: u3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from u1
 PREHOOK: type: QUERY
 PREHOOK: Input: default@u1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out b/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
index 46527d6..578205e 100644
--- a/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
+++ b/ql/src/test/results/clientpositive/tez/union_fast_stats.q.out
@@ -28,6 +28,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -38,6 +50,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -48,6 +72,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -58,6 +94,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -88,6 +136,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: desc formatted small_alltypesorc_a
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@small_alltypesorc_a
@@ -301,6 +361,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -311,6 +383,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -321,6 +405,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -331,6 +427,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc_a stored as orc as select * from
 (select * from (select * from small_alltypesorc1a) sq1
  union all
@@ -361,6 +469,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: desc formatted small_alltypesorc_a
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out b/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
index 66cda5a..d8f9c8b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_columns.q.out
@@ -48,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tsint_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TSINT
+POSTHOOK: Lineage: tsint.csint SIMPLE [(tsint_txt)tsint_txt.FieldSchema(name:csint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: tsint.rnum SIMPLE [(tsint_txt)tsint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tsint_txt.rnum	tsint_txt.csint
 PREHOOK: query: create table TINT stored as orc AS SELECT * FROM TINT_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
@@ -59,6 +61,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tint_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TINT
+POSTHOOK: Lineage: tint.cint SIMPLE [(tint_txt)tint_txt.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: tint.rnum SIMPLE [(tint_txt)tint_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 tint_txt.rnum	tint_txt.cint
 Warning: Map Join MAPJOIN[11][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: -- We DO NOT expect the following to vectorized because the BETWEEN range expressions

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
index fb88d57..4ae687e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_date_test
+POSTHOOK: Lineage: decimal_date_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out b/ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out
index 7f1f1c1..4ee8150 100644
--- a/ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_char_mapjoin1.q.out
@@ -98,6 +98,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_vc1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_vc1_orc
+POSTHOOK: Lineage: char_join1_vc1_orc.c1 SIMPLE [(char_join1_vc1)char_join1_vc1.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_vc1_orc.c2 SIMPLE [(char_join1_vc1)char_join1_vc1.FieldSchema(name:c2, type:char(10), comment:null), ]
 PREHOOK: query: create table char_join1_vc2_orc stored as orc as select * from char_join1_vc2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@char_join1_vc2
@@ -108,6 +110,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_vc2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_vc2_orc
+POSTHOOK: Lineage: char_join1_vc2_orc.c1 SIMPLE [(char_join1_vc2)char_join1_vc2.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_vc2_orc.c2 SIMPLE [(char_join1_vc2)char_join1_vc2.FieldSchema(name:c2, type:char(20), comment:null), ]
 PREHOOK: query: create table char_join1_str_orc stored as orc as select * from char_join1_str
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@char_join1_str
@@ -118,6 +122,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@char_join1_str
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@char_join1_str_orc
+POSTHOOK: Lineage: char_join1_str_orc.c1 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: char_join1_str_orc.c2 SIMPLE [(char_join1_str)char_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join char with same length char
 explain select * from char_join1_vc1_orc a join char_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out
index 4ed0a0f..7e6638e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out
@@ -32,6 +32,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL
+POSTHOOK: Lineage: decimal.dec SIMPLE [(decimal_txt)decimal_txt.FieldSchema(name:dec, type:decimal(10,0), comment:null), ]
 PREHOOK: query: EXPLAIN
 SELECT dec FROM `DECIMAL` order by dec
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
index eea91bb..537d568 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out
@@ -38,6 +38,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_3_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_3
+POSTHOOK: Lineage: decimal_3.key SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:key, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: decimal_3.value SIMPLE [(decimal_3_txt)decimal_3_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@decimal_3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
index e0ccbc6..15c9757 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out
@@ -258,6 +258,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@decimal_6_1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@DECIMAL_6_3
+POSTHOOK: Lineage: decimal_6_3.k EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:key, type:decimal(10,5), comment:null), ]
+POSTHOOK: Lineage: decimal_6_3.v EXPRESSION [(decimal_6_1)decimal_6_1.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: desc DECIMAL_6_3
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@decimal_6_3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
index 7d8823c..efc77ff 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out
@@ -16,6 +16,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_vgby
+POSTHOOK: Lineage: decimal_vgby.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
index 1b0320c..2976cb5 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
@@ -12,6 +12,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdecimal1 + cdecimal2 as c1, cdecimal1 - (2*cdecimal2) as c2, ((cdecimal1+2.34)/cdecimal2) as c3, (cdecimal1 * (cdecimal2/3.4)) as c4, cdecimal1 % 10 as c5, CAST(cdecimal1 AS INT) as c6, CAST(cdecimal2 AS SMALLINT) as c7, CAST(cdecimal2 AS TINYINT) as c8, CAST(cdecimal1 AS BIGINT) as c9, CAST (cdecimal1 AS BOOLEAN) as c10, CAST(cdecimal2 AS DOUBLE) as c11, CAST(cdecimal1 AS FLOAT) as c12, CAST(cdecimal2 AS STRING) as c13, CAST(cdecimal1 AS TIMESTAMP) as c14 FROM decimal_test WHERE cdecimal1 > 0 AND cdecimal1 < 12345.5678 AND cdecimal2 != 0 AND cdecimal2 > 1000 AND cdouble IS NOT NULL
 ORDER BY c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14
 LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out
index 5e85b63..cf9a71e 100644
--- a/ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_decimal_math_funcs.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end.
 
 explain 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out b/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
index 3b97296..86c7306 100644
--- a/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_grouping_sets.q.out
@@ -98,6 +98,35 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@store_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@store
+POSTHOOK: Lineage: store.s_city SIMPLE [(store_txt)store_txt.FieldSchema(name:s_city, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_closed_date_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_closed_date_sk, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_company_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_company_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_company_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_country SIMPLE [(store_txt)store_txt.FieldSchema(name:s_country, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_county SIMPLE [(store_txt)store_txt.FieldSchema(name:s_county, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_division_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_division_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_division_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_floor_space SIMPLE [(store_txt)store_txt.FieldSchema(name:s_floor_space, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_geography_class SIMPLE [(store_txt)store_txt.FieldSchema(name:s_geography_class, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_gmt_offset SIMPLE [(store_txt)store_txt.FieldSchema(name:s_gmt_offset, type:decimal(5,2), comment:null), ]
+POSTHOOK: Lineage: store.s_hours SIMPLE [(store_txt)store_txt.FieldSchema(name:s_hours, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_manager, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_market_desc SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_desc, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_market_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_id, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_market_manager SIMPLE [(store_txt)store_txt.FieldSchema(name:s_market_manager, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_number_employees SIMPLE [(store_txt)store_txt.FieldSchema(name:s_number_employees, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_rec_end_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_end_date, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_rec_start_date SIMPLE [(store_txt)store_txt.FieldSchema(name:s_rec_start_date, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_state SIMPLE [(store_txt)store_txt.FieldSchema(name:s_state, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_id SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_id, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_store_sk SIMPLE [(store_txt)store_txt.FieldSchema(name:s_store_sk, type:int, comment:null), ]
+POSTHOOK: Lineage: store.s_street_name SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_name, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_street_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_number, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_street_type SIMPLE [(store_txt)store_txt.FieldSchema(name:s_street_type, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_suite_number SIMPLE [(store_txt)store_txt.FieldSchema(name:s_suite_number, type:string, comment:null), ]
+POSTHOOK: Lineage: store.s_tax_precentage SIMPLE [(store_txt)store_txt.FieldSchema(name:s_tax_precentage, type:decimal(5,2), comment:null), ]
+POSTHOOK: Lineage: store.s_zip SIMPLE [(store_txt)store_txt.FieldSchema(name:s_zip, type:string, comment:null), ]
 PREHOOK: query: explain
 select s_store_id
  from store


[44/51] [abbrv] hive git commit: HIVE-12995 : LLAP: Synthetic file ids need collision checks (Sergey Shelukhin, reviewed by Gopal V)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
index 6cec80e..29b51ec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java
@@ -95,7 +95,7 @@ class EncodedReaderImpl implements EncodedReader {
           return tcc;
         }
       };
-  private final Long fileId;
+  private final Object fileKey;
   private final DataReader dataReader;
   private boolean isDataReaderOpen = false;
   private final CompressionCodec codec;
@@ -106,10 +106,10 @@ class EncodedReaderImpl implements EncodedReader {
   private ByteBufferAllocatorPool pool;
   private boolean isDebugTracingEnabled;
 
-  public EncodedReaderImpl(Long fileId, List<OrcProto.Type> types, CompressionCodec codec,
+  public EncodedReaderImpl(Object fileKey, List<OrcProto.Type> types, CompressionCodec codec,
       int bufferSize, long strideRate, DataCache cache, DataReader dataReader, PoolFactory pf)
           throws IOException {
-    this.fileId = fileId;
+    this.fileKey = fileKey;
     this.codec = codec;
     this.types = types;
     this.bufferSize = bufferSize;
@@ -271,14 +271,13 @@ class EncodedReaderImpl implements EncodedReader {
       offset += length;
     }
 
-    boolean hasFileId = this.fileId != null;
-    long fileId = hasFileId ? this.fileId : 0;
+    boolean hasFileId = this.fileKey != null;
     if (listToRead.get() == null) {
       // No data to read for this stripe. Check if we have some included index-only columns.
       // TODO: there may be a bug here. Could there be partial RG filtering on index-only column?
       if (hasIndexOnlyCols && (includedRgs == null)) {
         OrcEncodedColumnBatch ecb = POOLS.ecbPool.take();
-        ecb.init(fileId, stripeIx, OrcEncodedColumnBatch.ALL_RGS, colRgs.length);
+        ecb.init(fileKey, stripeIx, OrcEncodedColumnBatch.ALL_RGS, colRgs.length);
         consumer.consumeData(ecb);
       } else {
         LOG.warn("Nothing to read for stripe [" + stripe + "]");
@@ -289,14 +288,14 @@ class EncodedReaderImpl implements EncodedReader {
     // 2. Now, read all of the ranges from cache or disk.
     DiskRangeList.MutateHelper toRead = new DiskRangeList.MutateHelper(listToRead.get());
     if (isDebugTracingEnabled && LOG.isInfoEnabled()) {
-      LOG.info("Resulting disk ranges to read (file " + fileId + "): "
+      LOG.info("Resulting disk ranges to read (file " + fileKey + "): "
           + RecordReaderUtils.stringifyDiskRanges(toRead.next));
     }
     BooleanRef isAllInCache = new BooleanRef();
     if (hasFileId) {
-      cache.getFileData(fileId, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
+      cache.getFileData(fileKey, toRead.next, stripeOffset, CC_FACTORY, isAllInCache);
       if (isDebugTracingEnabled && LOG.isInfoEnabled()) {
-        LOG.info("Disk ranges after cache (file " + fileId + ", base offset " + stripeOffset
+        LOG.info("Disk ranges after cache (file " + fileKey + ", base offset " + stripeOffset
             + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
     }
@@ -324,7 +323,7 @@ class EncodedReaderImpl implements EncodedReader {
         }
       }
       if (isDebugTracingEnabled) {
-        LOG.info("Disk ranges after pre-read (file " + fileId + ", base offset "
+        LOG.info("Disk ranges after pre-read (file " + fileKey + ", base offset "
             + stripeOffset + "): " + RecordReaderUtils.stringifyDiskRanges(toRead.next));
       }
       iter = toRead.next; // Reset the iter to start.
@@ -337,13 +336,14 @@ class EncodedReaderImpl implements EncodedReader {
       boolean isLastRg = rgIx == rgCount - 1;
       // Create the batch we will use to return data for this RG.
       OrcEncodedColumnBatch ecb = POOLS.ecbPool.take();
-      ecb.init(fileId, stripeIx, rgIx, colRgs.length);
+      ecb.init(fileKey, stripeIx, rgIx, colRgs.length);
       boolean isRGSelected = true;
       for (int colIxMod = 0; colIxMod < colRgs.length; ++colIxMod) {
+        // TODO: simplify this now that high-level cache has been removed.
         if (colRgs[colIxMod] != null && !colRgs[colIxMod][rgIx]) {
           // RG x col filtered.
           isRGSelected = false;
-          continue; // TODO: this would be invalid with HL cache, where RG x col can be excluded.
+          continue;
         }
         ColumnReadContext ctx = colCtxs[colIxMod];
         OrcProto.RowIndexEntry index = ctx.rowIndex.getEntry(rgIx),
@@ -663,8 +663,8 @@ class EncodedReaderImpl implements EncodedReader {
     }
 
     // 6. Finally, put uncompressed data to cache.
-    if (fileId != null) {
-      long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
+    if (fileKey != null) {
+      long[] collisionMask = cache.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
       processCacheCollisions(collisionMask, toDecompress, targetBuffers, csd.getCacheBuffers());
     }
 
@@ -914,8 +914,8 @@ class EncodedReaderImpl implements EncodedReader {
     }
 
     // 6. Finally, put uncompressed data to cache.
-    if (fileId != null) {
-      long[] collisionMask = cache.putFileData(fileId, cacheKeys, targetBuffers, baseOffset);
+    if (fileKey != null) {
+      long[] collisionMask = cache.putFileData(fileKey, cacheKeys, targetBuffers, baseOffset);
       processCacheCollisions(collisionMask, toCache, targetBuffers, null);
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcBatchKey.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcBatchKey.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcBatchKey.java
index da673a5a..9bb171e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcBatchKey.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcBatchKey.java
@@ -19,28 +19,33 @@
 package org.apache.hadoop.hive.ql.io.orc.encoded;
 
 public class OrcBatchKey {
-  public long file;
+  public Object fileKey;
   public int stripeIx, rgIx;
 
+  // Make sure all the numbers are converted to long for size estimation.
   public OrcBatchKey(long file, int stripeIx, int rgIx) {
     set(file, stripeIx, rgIx);
   }
 
-  public void set(long file, int stripeIx, int rgIx) {
-    this.file = file;
+  public OrcBatchKey(Object file, int stripeIx, int rgIx) {
+    set(file, stripeIx, rgIx);
+  }
+
+  public void set(Object file, int stripeIx, int rgIx) {
+    this.fileKey = file;
     this.stripeIx = stripeIx;
     this.rgIx = rgIx;
   }
 
   @Override
   public String toString() {
-    return "[" + file + ", stripe " + stripeIx + ", rgIx " + rgIx + "]";
+    return "[" + fileKey + ", stripe " + stripeIx + ", rgIx " + rgIx + "]";
   }
 
   @Override
   public int hashCode() {
     final int prime = 31;
-    int result = prime + (int)(file ^ (file >>> 32));
+    int result = prime + fileKey.hashCode();
     return (prime * result + rgIx) * prime + stripeIx;
   }
 
@@ -50,11 +55,12 @@ public class OrcBatchKey {
     if (!(obj instanceof OrcBatchKey)) return false;
     OrcBatchKey other = (OrcBatchKey)obj;
     // Strings are interned and can thus be compared like this.
-    return stripeIx == other.stripeIx && rgIx == other.rgIx && file == other.file;
+    return stripeIx == other.stripeIx && rgIx == other.rgIx
+        && (fileKey == null ? (other.fileKey == null) : fileKey.equals(other.fileKey));
   }
 
   @Override
   public OrcBatchKey clone() throws CloneNotSupportedException {
-    return new OrcBatchKey(file, stripeIx, rgIx);
+    return new OrcBatchKey(fileKey, stripeIx, rgIx);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcCacheKey.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcCacheKey.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcCacheKey.java
deleted file mode 100644
index 9a0158e..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/OrcCacheKey.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.io.orc.encoded;
-
-public class OrcCacheKey extends OrcBatchKey {
-  public int colIx;
-
-  public OrcCacheKey(long file, int stripeIx, int rgIx, int colIx) {
-    super(file, stripeIx, rgIx);
-    this.colIx = colIx;
-  }
-
-  public OrcCacheKey(OrcBatchKey batchKey, int colIx) {
-    super(batchKey.file, batchKey.stripeIx, batchKey.rgIx);
-    this.colIx = colIx;
-  }
-
-  public OrcBatchKey copyToPureBatchKey() {
-    return new OrcBatchKey(file, stripeIx, rgIx);
-  }
-
-  @Override
-  public String toString() {
-    return "[" + file + ", stripe " + stripeIx + ", rgIx " + rgIx + ", rgIx " + colIx + "]";
-  }
-
-  @Override
-  public int hashCode() {
-    final int prime = 31;
-    return super.hashCode() * prime + colIx;
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj) return true;
-    if (!(obj instanceof OrcCacheKey)) return false;
-    OrcCacheKey other = (OrcCacheKey)obj;
-    // Strings are interned and can thus be compared like this.
-    return stripeIx == other.stripeIx && rgIx == other.rgIx
-        && file == other.file && other.colIx == colIx;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
index 246ead6..4405232 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/Reader.java
@@ -49,11 +49,11 @@ public interface Reader extends org.apache.hadoop.hive.ql.io.orc.Reader {
      * We assume the sort will stay the same for backward compat.
      */
     public static final int MAX_DATA_STREAMS = OrcProto.Stream.Kind.ROW_INDEX.getNumber();
-    public void init(long fileId, int stripeIx, int rgIx, int columnCount) {
+    public void init(Object fileKey, int stripeIx, int rgIx, int columnCount) {
       if (batchKey == null) {
-        batchKey = new OrcBatchKey(fileId, stripeIx, rgIx);
+        batchKey = new OrcBatchKey(fileKey, stripeIx, rgIx);
       } else {
-        batchKey.set(fileId, stripeIx, rgIx);
+        batchKey.set(fileKey, stripeIx, rgIx);
       }
       resetColumnArrays(columnCount);
     }
@@ -61,12 +61,12 @@ public interface Reader extends org.apache.hadoop.hive.ql.io.orc.Reader {
 
   /**
    * Creates the encoded reader.
-   * @param fileId File ID to read, to use for cache lookups and such.
+   * @param fileKey File ID to read, to use for cache lookups and such.
    * @param dataCache Data cache to use for cache lookups.
    * @param dataReader Data reader to read data not found in cache (from disk, HDFS, and such).
    * @param pf Pool factory to create object pools.
    * @return The reader.
    */
   EncodedReader encodedReader(
-      Long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException;
+      Object fileKey, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
index b0ac503..4856fb3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/ReaderImpl.java
@@ -34,8 +34,8 @@ class ReaderImpl extends org.apache.hadoop.hive.ql.io.orc.ReaderImpl implements
 
   @Override
   public EncodedReader encodedReader(
-      Long fileId, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException {
-    return new EncodedReaderImpl(fileId, types,
+      Object fileKey, DataCache dataCache, DataReader dataReader, PoolFactory pf) throws IOException {
+    return new EncodedReaderImpl(fileKey, types,
         codec, bufferSize, rowIndexStride, dataCache, dataReader, pf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
index 9ac53af..cef765c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/StreamUtils.java
@@ -35,7 +35,6 @@ public class StreamUtils {
    * Create SettableUncompressedStream from stream buffer.
    *
    * @param streamName - stream name
-   * @param fileId - file id
    * @param streamBuffer - stream buffer
    * @return - SettableUncompressedStream
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index 4fafe8c..f824e18 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -622,8 +622,8 @@ public class TestInputOutputFormat {
   public SplitStrategy<?> createOrCombineStrategy(OrcInputFormat.Context context,
       MockFileSystem fs, String path, OrcInputFormat.CombinedCtx combineCtx) throws IOException {
     OrcInputFormat.AcidDirInfo adi = createAdi(context, fs, path);
-    return OrcInputFormat.determineSplitStrategy(
-        combineCtx, context, adi.fs, adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, null);
+    return OrcInputFormat.determineSplitStrategy(combineCtx, context,
+        adi.fs, adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, null, true);
   }
 
   public OrcInputFormat.AcidDirInfo createAdi(
@@ -636,7 +636,7 @@ public class TestInputOutputFormat {
       OrcInputFormat.Context context, OrcInputFormat.FileGenerator gen) throws IOException {
     OrcInputFormat.AcidDirInfo adi = gen.call();
     return OrcInputFormat.determineSplitStrategy(
-        null, context, adi.fs, adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, null);
+        null, context, adi.fs, adi.splitPath, adi.acidInfo, adi.baseOrOriginalFiles, null, true);
   }
 
   public static class MockBlock {
@@ -1114,7 +1114,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
             AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
-            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null);
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     OrcSplit result = splitter.createSplit(0, 200, null);
     assertEquals(0, result.getStart());
     assertEquals(200, result.getLength());
@@ -1155,7 +1155,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
             AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
-            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null);
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     List<OrcSplit> results = splitter.call();
     OrcSplit result = results.get(0);
     assertEquals(3, result.getStart());
@@ -1178,7 +1178,7 @@ public class TestInputOutputFormat {
     context = new OrcInputFormat.Context(conf);
     splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
       AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
-        new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null);
+        new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     results = splitter.call();
     for(int i=0; i < stripeSizes.length; ++i) {
       assertEquals("checking stripe " + i + " size",
@@ -1206,7 +1206,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
             fs.getFileStatus(new Path("/a/file")), null, true,
-            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null);
+            new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     List<OrcSplit> results = splitter.call();
     OrcSplit result = results.get(0);
     assertEquals(3, results.size());
@@ -1229,7 +1229,7 @@ public class TestInputOutputFormat {
     splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
         fs.getFileStatus(new Path("/a/file")), null, true,
         new ArrayList<AcidInputFormat.DeltaMetaData>(),
-        true, null, null), null);
+        true, null, null), null, true);
     results = splitter.call();
     assertEquals(5, results.size());
     for (int i = 0; i < stripeSizes.length; ++i) {
@@ -1249,7 +1249,7 @@ public class TestInputOutputFormat {
     splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
         fs.getFileStatus(new Path("/a/file")), null, true,
         new ArrayList<AcidInputFormat.DeltaMetaData>(),
-        true, null, null), null);
+        true, null, null), null, true);
     results = splitter.call();
     assertEquals(1, results.size());
     result = results.get(0);

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java b/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
index 9046589..1273588 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/io/DataCache.java
@@ -57,7 +57,7 @@ public interface DataCache {
    * @param gotAllData An out param - whether all the requested data was found in cache.
    * @return The new or modified list of DiskRange-s, where some ranges may contain cached data.
    */
-  DiskRangeList getFileData(long fileId, DiskRangeList range, long baseOffset,
+  DiskRangeList getFileData(Object fileKey, DiskRangeList range, long baseOffset,
       DiskRangeListFactory factory, BooleanRef gotAllData);
 
   /**
@@ -79,7 +79,7 @@ public interface DataCache {
    * @return null if all data was put; bitmask indicating which chunks were not put otherwise;
    *         the replacement chunks from cache are updated directly in the array.
    */
-  long[] putFileData(long fileId, DiskRange[] ranges, MemoryBuffer[] data, long baseOffset);
+  long[] putFileData(Object fileKey, DiskRange[] ranges, MemoryBuffer[] data, long baseOffset);
 
   /**
    * Releases the buffer returned by getFileData/provided to putFileData back to cache.

http://git-wip-us.apache.org/repos/asf/hive/blob/26b5c7b5/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java b/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
index 3ef7abe..ddba889 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/io/encoded/EncodedColumnBatch.java
@@ -82,13 +82,6 @@ public class EncodedColumnBatch<BatchKey> {
   protected ColumnStreamData[][] columnData;
   /** Column indexes included in the batch. Correspond to columnData elements. */
   protected int[] columnIxs;
-  // TODO: Maybe remove when solving the pooling issue.
-  /** Generation version necessary to sync pooling reuse with the fact that two separate threads
-   * operate on batches - the one that decodes them, and potential separate thread w/a "stop" call
-   * that cleans them up. We don't want the decode thread to use the ECB that was thrown out and
-   * reused, so it remembers the version and checks it after making sure no cleanup thread can ever
-   * get to this ECB anymore. All this sync is ONLY needed because of high level cache code. */
-  public int version = Integer.MIN_VALUE;
 
   public void reset() {
     if (columnData == null) return;
@@ -117,7 +110,7 @@ public class EncodedColumnBatch<BatchKey> {
   }
 
   public BatchKey getBatchKey() {
-    return batchKey;
+    return batchKey; // TODO#: who uses this? can we remove fileId?
   }
 
   public ColumnStreamData[][] getColumnData() {


[21/51] [abbrv] hive git commit: HIVE-12781 : Temporarily disable authorization tests that always fail on Jenkins (Ashutosh Chauhan via Sushanth Sowmyan)

Posted by jd...@apache.org.
HIVE-12781 : Temporarily disable authorization tests that always fail on Jenkins (Ashutosh Chauhan via Sushanth Sowmyan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9e231f2b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9e231f2b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9e231f2b

Branch: refs/heads/llap
Commit: 9e231f2b35cf58437be6c274217147010a28e207
Parents: fdc9caf
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Mar 9 16:11:56 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Mar 9 16:11:56 2016 -0800

----------------------------------------------------------------------
 itests/qtest/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9e231f2b/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index cb68e39..a479557 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -467,7 +467,7 @@
                   templatePath="${basedir}/${hive.path.to.root}/ql/src/test/templates/" template="TestNegativeCliDriver.vm"
                   queryDirectory="${basedir}/${hive.path.to.root}/ql/src/test/queries/clientnegative/"
                   queryFile="${qfile}"
-                  excludeQueryFile="${minimr.query.negative.files}"
+                  excludeQueryFile="${minimr.query.negative.files},authorization_uri_import.q"
                   queryFileRegex="${qfile_regex}"
                   clusterMode="${clustermode}"
                   runDisabled="${run_disabled}"


[10/51] [abbrv] hive git commit: HIVE-11483 : Add encoding and decoding for query string config (Rajat Khandelwal, reviewed by Amareshwari Sriramadasu

Posted by jd...@apache.org.
HIVE-11483 : Add encoding and decoding for query string config (Rajat Khandelwal, reviewed by Amareshwari Sriramadasu


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/415373bb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/415373bb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/415373bb

Branch: refs/heads/llap
Commit: 415373bbd1c58e7b8f00aa32c0bb7d485d4f91e5
Parents: fe14a90
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Wed Mar 9 14:17:21 2016 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Wed Mar 9 14:17:21 2016 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   | 76 ++++++++++++++++----
 .../apache/hadoop/hive/conf/TestHiveConf.java   | 10 +++
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  5 +-
 .../hive/ql/exec/errors/TaskLogProcessor.java   |  2 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |  2 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |  2 +-
 .../hadoop/hive/ql/session/SessionState.java    |  6 +-
 .../ql/exec/errors/TestTaskLogProcessor.java    |  8 +--
 .../apache/hadoop/hive/ql/hooks/TestHooks.java  |  2 +-
 9 files changed, 85 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 5098851..27a56dd 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -40,12 +40,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.security.auth.login.LoginException;
-import java.io.ByteArrayOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.PrintStream;
+
+import java.io.*;
 import java.net.URL;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -96,6 +95,35 @@ public class HiveConf extends Configuration {
     this.isSparkConfigUpdated = isSparkConfigUpdated;
   }
 
+  public interface EncoderDecoder<K, V> {
+    V encode(K key);
+    K decode(V value);
+  }
+
+  public static class URLEncoderDecoder implements EncoderDecoder<String, String> {
+    private static final String UTF_8 = "UTF-8";
+    @Override
+    public String encode(String key) {
+      try {
+        return URLEncoder.encode(key, UTF_8);
+      } catch (UnsupportedEncodingException e) {
+        return key;
+      }
+    }
+
+    @Override
+    public String decode(String value) {
+      try {
+        return URLDecoder.decode(value, UTF_8);
+      } catch (UnsupportedEncodingException e) {
+        return value;
+      }
+    }
+  }
+  public static class EncoderDecoderFactory {
+    public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder();
+  }
+
   static {
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
     if (classLoader == null) {
@@ -3283,10 +3311,8 @@ public class HiveConf extends Configuration {
 
   public static String getVar(Configuration conf, ConfVars var) {
     assert (var.valClass == String.class) : var.varname;
-    if (var.altName != null) {
-      return conf.get(var.varname, conf.get(var.altName, var.defaultStrVal));
-    }
-    return conf.get(var.varname, var.defaultStrVal);
+    return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal))
+      : conf.get(var.varname, var.defaultStrVal);
   }
 
   public static String getTrimmedVar(Configuration conf, ConfVars var) {
@@ -3309,10 +3335,13 @@ public class HiveConf extends Configuration {
   }
 
   public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
-    if (var.altName != null) {
-      return conf.get(var.varname, conf.get(var.altName, defaultVal));
-    }
-    return conf.get(var.varname, defaultVal);
+    String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal))
+      : conf.get(var.varname, defaultVal);
+    return ret;
+  }
+
+  public static String getVar(Configuration conf, ConfVars var, EncoderDecoder<String, String> encoderDecoder) {
+    return encoderDecoder.decode(getVar(conf, var));
   }
 
   public String getLogIdVar(String defaultValue) {
@@ -3333,6 +3362,10 @@ public class HiveConf extends Configuration {
     assert (var.valClass == String.class) : var.varname;
     conf.set(var.varname, val);
   }
+  public static void setVar(Configuration conf, ConfVars var, String val,
+    EncoderDecoder<String, String> encoderDecoder) {
+    setVar(conf, var, encoderDecoder.encode(val));
+  }
 
   public static ConfVars getConfVars(String name) {
     return vars.get(name);
@@ -3350,6 +3383,21 @@ public class HiveConf extends Configuration {
     setVar(this, var, val);
   }
 
+  public String getQueryString() {
+    return getQueryString(this);
+  }
+
+  public static String getQueryString(Configuration conf) {
+    return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER);
+  }
+
+  public void setQueryString(String query) {
+    setQueryString(this, query);
+  }
+
+  public static void setQueryString(Configuration conf, String query) {
+    setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER);
+  }
   public void logVars(PrintStream ps) {
     for (ConfVars one : ConfVars.values()) {
       ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : ""));
@@ -3904,7 +3952,7 @@ public class HiveConf extends Configuration {
     }
 
     private static boolean isAllowed(Configuration conf, ConfVars setting) {
-      String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, null);
+      String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null);
       return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting);
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
index 365d500..f88573f 100644
--- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
+++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
@@ -25,6 +25,8 @@ import org.apache.hive.common.util.HiveTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
 import java.util.concurrent.TimeUnit;
 
 
@@ -156,4 +158,12 @@ public class TestHiveConf {
     conf.setSparkConfigUpdated(false);
     Assert.assertFalse(conf.getSparkConfigUpdated());
   }
+  @Test
+  public void testEncodingDecoding() throws UnsupportedEncodingException {
+    HiveConf conf = new HiveConf();
+    String query = "select blah, '\u0001' from random_table";
+    conf.setQueryString(query);
+    Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), conf.get(ConfVars.HIVEQUERYSTRING.varname));
+    Assert.assertEquals(query, conf.getQueryString());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index 3253146..b50c5a2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -86,7 +86,6 @@ import org.apache.hadoop.hive.ql.metadata.formatting.MetaDataFormatter;
 import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
-import org.apache.hadoop.hive.ql.parse.CalcitePlanner;
 import org.apache.hadoop.hive.ql.parse.ColumnAccessInfo;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
@@ -499,7 +498,7 @@ public class Driver implements CommandProcessor {
       plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
         SessionState.get().getHiveOperation(), schema);
 
-      conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);
+      conf.setQueryString(queryStr);
 
       conf.set("mapreduce.workflow.id", "hive_" + queryId);
       conf.set("mapreduce.workflow.name", queryStr);
@@ -1484,7 +1483,7 @@ public class Driver implements CommandProcessor {
     String queryId = plan.getQueryId();
     // Get the query string from the conf file as the compileInternal() method might
     // hide sensitive information during query redaction.
-    String queryStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYSTRING);
+    String queryStr = conf.getQueryString();
 
     maxthreads = HiveConf.getIntVar(conf, HiveConf.ConfVars.EXECPARALLETHREADNUMBER);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/TaskLogProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/TaskLogProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/TaskLogProcessor.java
index 68123d4..b788d8f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/TaskLogProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/TaskLogProcessor.java
@@ -53,7 +53,7 @@ public class TaskLogProcessor {
   private final String query;
 
   public TaskLogProcessor(JobConf conf) {
-    query = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYSTRING);
+    query = HiveConf.getQueryString(conf);
 
     heuristics.put(new ScriptErrorHeuristic(), new HeuristicStats());
     heuristics.put(new MapAggrMemErrorHeuristic(), new HeuristicStats());

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index b184b4e..ce020a5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -568,7 +568,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
     // Intentionally overwrites anything the user may have put here
     conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted());
 
-    if (HiveConf.getVar(conf, ConfVars.HIVE_CURRENT_DATABASE, null) == null) {
+    if (HiveConf.getVar(conf, ConfVars.HIVE_CURRENT_DATABASE, (String)null) == null) {
       HiveConf.setVar(conf, ConfVars.HIVE_CURRENT_DATABASE, getCurrentDB());
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
index 473dbd6..79da860 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java
@@ -904,7 +904,7 @@ public class DagUtils {
 
   public FileStatus getHiveJarDirectory(Configuration conf) throws IOException, LoginException {
     FileStatus fstatus = null;
-    String hdfsDirPathStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_JAR_DIRECTORY, null);
+    String hdfsDirPathStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_JAR_DIRECTORY, (String)null);
     if (hdfsDirPathStr != null) {
       LOG.info("Hive jar directory is " + hdfsDirPathStr);
       fstatus = validateTargetDir(new Path(hdfsDirPathStr), conf);

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 109cd8c..78bbb1f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -373,11 +373,11 @@ public class SessionState {
   }
 
   public void setCmd(String cmdString) {
-    conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, cmdString);
+    conf.setQueryString(cmdString);
   }
 
   public String getCmd() {
-    return (conf.getVar(HiveConf.ConfVars.HIVEQUERYSTRING));
+    return (conf.getQueryString());
   }
 
   public String getQueryId() {
@@ -1642,7 +1642,7 @@ public class SessionState {
     // Provide a facility to set current timestamp during tests
     if (conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
       String overrideTimestampString =
-          HiveConf.getVar(conf, HiveConf.ConfVars.HIVETESTCURRENTTIMESTAMP, null);
+          HiveConf.getVar(conf, HiveConf.ConfVars.HIVETESTCURRENTTIMESTAMP, (String)null);
       if (overrideTimestampString != null && overrideTimestampString.length() > 0) {
         queryCurrentTimestamp = Timestamp.valueOf(overrideTimestampString);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java
index 67a86a6..477479d 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java
@@ -98,7 +98,7 @@ public class TestTaskLogProcessor {
   @Test
   public void testGetStackTraces() throws Exception {
     JobConf jobConf = new JobConf();
-    jobConf.set(HiveConf.ConfVars.HIVEQUERYSTRING.varname, "select * from foo group by moo;");
+    HiveConf.setQueryString(jobConf, "select * from foo group by moo;");
 
     final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf);
 
@@ -150,7 +150,7 @@ public class TestTaskLogProcessor {
   @Test
   public void testScriptErrorHeuristic() throws Exception {
     JobConf jobConf = new JobConf();
-    jobConf.set(HiveConf.ConfVars.HIVEQUERYSTRING.varname, "select * from foo group by moo;");
+    HiveConf.setQueryString(jobConf, "select * from foo group by moo;");
 
     final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf);
     
@@ -177,7 +177,7 @@ public class TestTaskLogProcessor {
   @Test
   public void testDataCorruptErrorHeuristic() throws Exception {
     JobConf jobConf = new JobConf();
-    jobConf.set(HiveConf.ConfVars.HIVEQUERYSTRING.varname, "select * from foo group by moo;");
+    HiveConf.setQueryString(jobConf, "select * from foo group by moo;");
 
     final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf);
     
@@ -210,7 +210,7 @@ public class TestTaskLogProcessor {
   @Test
   public void testMapAggrMemErrorHeuristic() throws Exception {
     JobConf jobConf = new JobConf();
-    jobConf.set(HiveConf.ConfVars.HIVEQUERYSTRING.varname, "select * from foo group by moo;");
+    HiveConf.setQueryString(jobConf, "select * from foo group by moo;");
 
     final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/415373bb/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
index 8d27762..4c14d8b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java
@@ -73,7 +73,7 @@ public class TestHooks {
     Driver driver = createDriver(conf);
     int ret = driver.compile("select 'XXX' from t1");
     assertEquals("Checking command success", 0, ret);
-    assertEquals("select 'AAA' from t1", HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYSTRING));
+    assertEquals("select 'AAA' from t1", conf.getQueryString());
   }
 
   public static class SimpleQueryRedactor extends Redactor {


[51/51] [abbrv] hive git commit: HIVE-13304: Merge master into llap branch

Posted by jd...@apache.org.
HIVE-13304: Merge master into llap branch


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2945c3b2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2945c3b2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2945c3b2

Branch: refs/heads/llap
Commit: 2945c3b2d04304326b48bdf0e646fb36be148fe5
Parents: 81b26df a17122f
Author: Jason Dere <jd...@hortonworks.com>
Authored: Thu Mar 17 15:40:06 2016 -0700
Committer: Jason Dere <jd...@hortonworks.com>
Committed: Thu Mar 17 15:40:06 2016 -0700

----------------------------------------------------------------------
 beeline/pom.xml                                 |    22 +
 .../org/apache/hadoop/hive/cli/CliDriver.java   |     7 +-
 common/pom.xml                                  |    19 +
 .../org/apache/hadoop/hive/conf/HiveConf.java   |    88 +-
 .../hive/common/metrics/MetricsTestUtils.java   |     9 +
 .../metrics/metrics2/TestCodahaleMetrics.java   |    26 +-
 .../apache/hadoop/hive/conf/TestHiveConf.java   |    10 +
 data/files/dec_old.avro                         |   Bin 0 -> 331 bytes
 data/files/over4_null                           |     5 +
 .../hive/hcatalog/cli/TestSemanticAnalysis.java |    15 +
 .../org/apache/hive/minikdc/MiniHiveKdc.java    |    15 +
 .../hive/minikdc/TestJdbcWithDBTokenStore.java  |    40 +
 .../hive/minikdc/TestJdbcWithMiniKdc.java       |    12 +-
 .../hive/thrift/TestHadoopAuthBridge23.java     |    63 +-
 .../hive/metastore/TestHiveMetaStore.java       |     1 +
 .../hadoop/hive/thrift/TestDBTokenStore.java    |     3 +-
 .../hive/thrift/TestZooKeeperTokenStore.java    |    12 +-
 .../test/java/org/apache/hive/jdbc/TestSSL.java |     2 +
 .../cli/TestEmbeddedThriftBinaryCLIService.java |     1 +
 .../TestOperationLoggingAPIWithTez.java         |     2 +-
 .../service/cli/session/TestQueryDisplay.java   |   180 +
 itests/qtest/pom.xml                            |     2 +-
 .../test/resources/testconfiguration.properties |     2 +
 .../org/apache/hive/jdbc/HiveConnection.java    |    30 +-
 .../hive/jdbc/HttpTokenAuthInterceptor.java     |    47 +
 jdbc/src/java/org/apache/hive/jdbc/Utils.java   |     4 +
 .../hive/llap/counters/LlapIOCounters.java      |    37 +
 .../llap/IncrementalObjectSizeEstimator.java    |     7 +-
 .../apache/hadoop/hive/llap/cache/Cache.java    |    27 -
 .../hadoop/hive/llap/cache/LowLevelCache.java   |     4 +-
 .../hive/llap/cache/LowLevelCacheImpl.java      |    28 +-
 .../hadoop/hive/llap/cache/NoopCache.java       |    33 -
 .../hive/llap/cli/LlapOptionsProcessor.java     |    46 +-
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |    20 +
 .../hive/llap/counters/FragmentCountersMap.java |    46 +
 .../llap/counters/QueryFragmentCounters.java    |    65 +-
 .../hive/llap/daemon/impl/LlapDaemon.java       |     6 +-
 .../hive/llap/daemon/impl/LlapTaskReporter.java |    14 +-
 .../llap/daemon/impl/TaskRunnerCallable.java    |    14 +-
 .../hive/llap/io/api/impl/LlapInputFormat.java  |    30 +-
 .../hive/llap/io/api/impl/LlapIoImpl.java       |     6 +-
 .../llap/io/decode/EncodedDataConsumer.java     |    77 +-
 .../llap/io/decode/OrcColumnVectorProducer.java |     8 +-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |    11 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |   214 +-
 .../hive/llap/io/metadata/OrcFileMetadata.java  |    33 +-
 .../hive/llap/io/metadata/OrcMetadataCache.java |    12 +-
 .../llap/io/metadata/OrcStripeMetadata.java     |    19 +-
 .../hive/llap/security/SecretManager.java       |    39 +-
 llap-server/src/main/resources/package.py       |    11 +
 llap-server/src/main/resources/templates.py     |     6 +-
 metastore/if/hive_metastore.thrift              |    28 +-
 .../apache/hadoop/hive/metastore/Metastore.java |    22 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 22340 ++++++++++-------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  1409 +-
 .../ThriftHiveMetastore_server.skeleton.cpp     |    40 +
 .../hive/metastore/api/ThriftHiveMetastore.java |  9220 ++++++-
 .../gen-php/metastore/ThriftHiveMetastore.php   |  1756 ++
 .../hive_metastore/ThriftHiveMetastore-remote   |    56 +
 .../hive_metastore/ThriftHiveMetastore.py       |  1639 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   450 +
 .../hive/metastore/AcidEventListener.java       |    94 +
 .../hadoop/hive/metastore/FileFormatProxy.java  |     6 +-
 .../hive/metastore/FileMetadataHandler.java     |     2 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |     2 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   265 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    85 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    23 +-
 .../hive/metastore/MetaStoreDirectSql.java      |     8 +-
 .../hadoop/hive/metastore/ObjectStore.java      |    14 +-
 .../hive/metastore/TSetIpAddressProcessor.java  |     2 +-
 .../TransactionalValidationListener.java        |    11 +
 .../filemeta/OrcFileMetadataHandler.java        |    15 +-
 .../hadoop/hive/metastore/hbase/HBaseUtils.java |    39 +-
 .../metastore/txn/CompactionTxnHandler.java     |     2 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |    20 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   810 +-
 .../hadoop/hive/metastore/txn/TxnStore.java     |    37 +-
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    18 +
 .../hadoop/hive/metastore/model/MOrder.java     |     4 +-
 .../hive/metastore/IpAddressListener.java       |     2 +-
 .../hive/metastore/hbase/TestHBaseStore.java    |    24 +-
 .../hbase/TestHBaseStoreBitVector.java          |    34 +-
 .../hbase/TestSharedStorageDescriptor.java      |    11 +-
 .../metastore/txn/TestTxnHandlerNegative.java   |    10 +-
 .../protobuf-java/org/apache/orc/OrcProto.java  |    16 +
 orc/src/java/org/apache/orc/FileMetadata.java   |     2 +-
 orc/src/java/org/apache/orc/OrcFile.java        |     5 +-
 orc/src/java/org/apache/orc/impl/InStream.java  |     2 +-
 .../java/org/apache/orc/impl/MemoryManager.java |     8 +-
 orc/src/java/org/apache/orc/impl/OutStream.java |     2 +-
 orc/src/protobuf/orc_proto.proto                |     2 +
 .../test/org/apache/orc/impl/TestOutStream.java |    43 +
 ql/pom.xml                                      |    22 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |    93 +-
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |     1 +
 .../org/apache/hadoop/hive/ql/QueryDisplay.java |   133 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |    14 +
 .../hive/ql/exec/AbstractFileMergeOperator.java |    23 +-
 .../hadoop/hive/ql/exec/ColumnStatsTask.java    |    17 +-
 .../hive/ql/exec/ColumnStatsUpdateTask.java     |    12 +-
 .../org/apache/hadoop/hive/ql/exec/DDLTask.java |    89 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |     4 +
 .../hadoop/hive/ql/exec/MapJoinOperator.java    |     2 +
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |     9 +-
 .../hive/ql/exec/OrcFileMergeOperator.java      |    14 +-
 .../hadoop/hive/ql/exec/ReduceSinkOperator.java |     1 +
 .../hadoop/hive/ql/exec/StatsNoJobTask.java     |    41 +-
 .../apache/hadoop/hive/ql/exec/StatsTask.java   |    14 +-
 .../hadoop/hive/ql/exec/TableScanOperator.java  |    10 +
 .../org/apache/hadoop/hive/ql/exec/Task.java    |    87 +-
 .../apache/hadoop/hive/ql/exec/Utilities.java   |     4 +-
 .../hive/ql/exec/errors/TaskLogProcessor.java   |     2 +-
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |     8 +-
 .../hadoop/hive/ql/exec/mr/MapRedTask.java      |     8 +-
 .../persistence/HybridHashTableContainer.java   |    22 +-
 .../persistence/MapJoinBytesTableContainer.java |    36 +-
 .../hive/ql/exec/persistence/MapJoinKey.java    |    12 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |     2 +-
 .../ql/exec/tez/DynamicPartitionPruner.java     |     2 +-
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  |   238 +-
 .../ql/exec/vector/VectorizationContext.java    |     5 +-
 ...AbstractFilterStringColLikeStringScalar.java |   168 +-
 .../FilterStringColLikeStringScalar.java        |    18 +
 .../VectorReduceSinkCommonOperator.java         |    66 +-
 .../hadoop/hive/ql/history/HiveHistory.java     |     2 +-
 .../ql/hooks/UpdateInputAccessTimeHook.java     |    20 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |    47 +-
 .../org/apache/hadoop/hive/ql/io/HdfsUtils.java |    23 +-
 .../hadoop/hive/ql/io/SyntheticFileId.java      |   100 +
 .../hadoop/hive/ql/io/orc/ExternalCache.java    |   338 +
 .../hadoop/hive/ql/io/orc/LocalCache.java       |   112 +
 .../io/orc/MetastoreExternalCachesByConf.java   |    82 +
 .../hive/ql/io/orc/OrcFileFormatProxy.java      |    14 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   |   698 +-
 .../hive/ql/io/orc/OrcNewInputFormat.java       |    16 +-
 .../hadoop/hive/ql/io/orc/OrcOutputFormat.java  |    17 +-
 .../apache/hadoop/hive/ql/io/orc/OrcSplit.java  |    46 +-
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |   125 +-
 .../ql/io/orc/encoded/EncodedReaderImpl.java    |    32 +-
 .../hive/ql/io/orc/encoded/OrcBatchKey.java     |    20 +-
 .../hive/ql/io/orc/encoded/OrcCacheKey.java     |    58 -
 .../hadoop/hive/ql/io/orc/encoded/Reader.java   |    10 +-
 .../hive/ql/io/orc/encoded/ReaderImpl.java      |     4 +-
 .../hive/ql/io/orc/encoded/StreamUtils.java     |     1 -
 .../serde/ArrayWritableObjectInspector.java     |     4 +-
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |     2 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |   111 +-
 .../hive/ql/metadata/PartitionIterable.java     |     2 +-
 .../BucketingSortingReduceSinkOptimizer.java    |    51 +-
 .../hadoop/hive/ql/optimizer/ColumnPruner.java  |     4 +
 .../ql/optimizer/ColumnPrunerProcFactory.java   |    12 +
 .../optimizer/ConstantPropagateProcFactory.java |    32 +-
 .../hive/ql/optimizer/ConvertJoinMapJoin.java   |    85 +-
 .../ql/optimizer/ReduceSinkMapJoinProc.java     |    15 +-
 .../optimizer/SortedDynPartitionOptimizer.java  |    78 +-
 .../ql/optimizer/calcite/RelOptHiveTable.java   |    10 +-
 .../calcite/reloperators/HiveTableScan.java     |    23 +-
 .../calcite/rules/HiveRelFieldTrimmer.java      |    42 +-
 .../calcite/translator/ASTBuilder.java          |     8 +
 .../calcite/translator/ASTConverter.java        |    49 +-
 .../calcite/translator/ExprNodeConverter.java   |    12 +
 .../calcite/translator/HiveOpConverter.java     |    39 +-
 .../correlation/ReduceSinkDeDuplication.java    |    34 +-
 .../ql/optimizer/index/RewriteGBUsingIndex.java |     1 +
 .../RewriteQueryUsingAggregateIndexCtx.java     |     1 +
 .../physical/BucketingSortingOpProcFactory.java |     3 +-
 .../hive/ql/optimizer/physical/LlapDecider.java |    11 +-
 .../physical/MetadataOnlyOptimizer.java         |     3 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    13 +-
 .../spark/SparkReduceSinkMapJoinProc.java       |    23 +-
 .../stats/annotation/StatsRulesProcFactory.java |   184 +-
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |    27 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    75 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |     2 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    48 +-
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |     4 +-
 .../hadoop/hive/ql/parse/IndexUpdater.java      |     1 +
 .../hadoop/hive/ql/parse/PTFInvocationSpec.java |    25 +-
 .../hadoop/hive/ql/parse/PTFTranslator.java     |    26 +-
 .../hadoop/hive/ql/parse/ParseContext.java      |    35 +-
 .../org/apache/hadoop/hive/ql/parse/QB.java     |    30 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   156 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |     3 +-
 .../hive/ql/parse/TypeCheckProcFactory.java     |     2 +
 .../hadoop/hive/ql/parse/WindowingSpec.java     |    56 +-
 .../HiveAuthorizationTaskFactoryImpl.java       |     1 +
 .../hadoop/hive/ql/plan/CreateTableDesc.java    |    26 +
 .../hadoop/hive/ql/plan/PTFDeserializer.java    |     5 +-
 .../apache/hadoop/hive/ql/plan/PlanUtils.java   |    35 +-
 .../hadoop/hive/ql/plan/ReduceSinkDesc.java     |    23 +-
 .../hadoop/hive/ql/plan/TableScanDesc.java      |    16 +
 .../hive/ql/plan/ptf/OrderExpressionDef.java    |    13 +-
 .../hive/ql/plan/ptf/PTFExpressionDef.java      |     3 +-
 .../plan/ptf/PartitionedTableFunctionDef.java   |    12 +-
 .../hive/ql/plan/ptf/ValueBoundaryDef.java      |    16 +-
 .../HiveAuthorizationProviderBase.java          |    16 +-
 .../authorization/plugin/HiveV1Authorizer.java  |    21 +-
 .../hadoop/hive/ql/session/SessionState.java    |    28 +-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |     2 +
 .../hive/ql/txn/compactor/CompactorMR.java      |     2 +-
 .../hadoop/hive/ql/txn/compactor/Initiator.java |     2 +-
 .../org/apache/hadoop/hive/ql/udf/UDFChr.java   |   101 +
 .../apache/hadoop/hive/ql/udf/UDFReplace.java   |    50 +
 .../ql/udf/generic/GenericUDFFormatNumber.java  |    77 +-
 .../hive/ql/udf/ptf/WindowingTableFunction.java |   302 +-
 ql/src/test/data/rc-file-v0.rc                  |   Bin 216 -> 0 bytes
 .../apache/hadoop/hive/ql/TestTxnCommands2.java |    22 +-
 .../ql/exec/errors/TestTaskLogProcessor.java    |     8 +-
 .../TestVectorStringExpressions.java            |   156 +-
 .../apache/hadoop/hive/ql/hooks/TestHooks.java  |     2 +-
 .../apache/hadoop/hive/ql/io/TestRCFile.java    |     3 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   |    61 +-
 .../hadoop/hive/ql/io/orc/TestOrcFile.java      |   113 +
 .../hive/ql/io/orc/TestOrcSplitElimination.java |   405 +-
 .../hadoop/hive/ql/io/orc/TestReaderImpl.java   |   151 +
 .../hive/ql/lockmgr/TestDbTxnManager2.java      |   209 +-
 .../hive/ql/udf/generic/TestGenericUDFChr.java  |   156 +
 .../ql/udf/generic/TestGenericUDFReplace.java   |    56 +
 .../clientnegative/alter_external_acid.q        |     9 +
 .../clientnegative/authorization_view_1.q       |    13 +
 .../clientnegative/authorization_view_2.q       |    17 +
 .../clientnegative/authorization_view_3.q       |    15 +
 .../clientnegative/authorization_view_4.q       |    23 +
 .../clientnegative/authorization_view_5.q       |    16 +
 .../clientnegative/authorization_view_6.q       |    18 +
 .../clientnegative/authorization_view_7.q       |    18 +
 .../authorization_view_disable_cbo_1.q          |    14 +
 .../authorization_view_disable_cbo_2.q          |    17 +
 .../authorization_view_disable_cbo_3.q          |    16 +
 .../authorization_view_disable_cbo_4.q          |    24 +
 .../authorization_view_disable_cbo_5.q          |    17 +
 .../authorization_view_disable_cbo_6.q          |    19 +
 .../authorization_view_disable_cbo_7.q          |    19 +
 .../test/queries/clientnegative/avro_decimal.q  |    17 +
 .../clientnegative/compact_non_acid_table.q     |    11 +
 .../clientnegative/create_external_acid.q       |     6 +
 .../clientnegative/udf_format_number_wrong6.q   |     2 -
 .../test/queries/clientpositive/add_jar_pfile.q |     2 +-
 .../clientpositive/annotate_stats_filter.q      |    12 +-
 .../clientpositive/authorization_view_1.q       |    59 +
 .../clientpositive/authorization_view_2.q       |    16 +
 .../clientpositive/authorization_view_3.q       |    18 +
 .../clientpositive/authorization_view_4.q       |    18 +
 .../authorization_view_disable_cbo_1.q          |    70 +
 .../authorization_view_disable_cbo_2.q          |    17 +
 .../authorization_view_disable_cbo_3.q          |    19 +
 .../authorization_view_disable_cbo_4.q          |    19 +
 .../queries/clientpositive/avro_decimal_old.q   |    14 +
 ql/src/test/queries/clientpositive/constprog2.q |    17 -
 .../clientpositive/constprog_partitioner.q      |    25 -
 .../queries/clientpositive/cross_join_merge.q   |    17 +
 .../queries/clientpositive/dbtxnmgr_compact1.q  |     2 +-
 .../queries/clientpositive/dbtxnmgr_compact2.q  |     2 +-
 .../queries/clientpositive/dbtxnmgr_compact3.q  |     2 +-
 .../clientpositive/encryption_drop_partition.q  |    10 +
 .../clientpositive/encryption_drop_table.q      |     9 +-
 ql/src/test/queries/clientpositive/keyword_3.q  |     8 +
 ql/src/test/queries/clientpositive/orc_create.q |    12 +
 .../clientpositive/orc_merge_incompat3.q        |    14 +
 ql/src/test/queries/clientpositive/order_null.q |    29 +
 .../reduceSinkDeDuplication_pRS_key_empty.q     |    60 +
 .../test/queries/clientpositive/skiphf_aggr.q   |    42 +
 ql/src/test/queries/clientpositive/udf_chr.q    |    25 +
 .../queries/clientpositive/udf_format_number.q  |    36 +-
 .../test/queries/clientpositive/udf_replace.q   |     9 +
 .../test/queries/clientpositive/vector_udf1.q   |   327 +
 .../clientpositive/windowing_order_null.q       |    35 +
 .../clientpositive/windowing_range_multiorder.q |    34 +
 .../resources/orc-file-dump-bloomfilter.out     |     2 +-
 .../resources/orc-file-dump-bloomfilter2.out    |     2 +-
 .../orc-file-dump-dictionary-threshold.out      |     2 +-
 ql/src/test/resources/orc-file-dump.json        |     2 +-
 ql/src/test/resources/orc-file-dump.out         |     2 +-
 ql/src/test/resources/orc-file-has-null.out     |     2 +-
 ql/src/test/resources/rc-file-v0.rc             |   Bin 0 -> 216 bytes
 .../clientnegative/alter_external_acid.q.out    |    13 +
 .../clientnegative/authorization_part.q.out     |     2 +
 .../clientnegative/authorization_view_1.q.out   |    33 +
 .../clientnegative/authorization_view_2.q.out   |    39 +
 .../clientnegative/authorization_view_3.q.out   |    39 +
 .../clientnegative/authorization_view_4.q.out   |    71 +
 .../clientnegative/authorization_view_5.q.out   |    35 +
 .../clientnegative/authorization_view_6.q.out   |    45 +
 .../clientnegative/authorization_view_7.q.out   |    45 +
 .../authorization_view_disable_cbo_1.q.out      |    33 +
 .../authorization_view_disable_cbo_2.q.out      |    39 +
 .../authorization_view_disable_cbo_3.q.out      |    39 +
 .../authorization_view_disable_cbo_4.q.out      |    71 +
 .../authorization_view_disable_cbo_5.q.out      |    35 +
 .../authorization_view_disable_cbo_6.q.out      |    45 +
 .../authorization_view_disable_cbo_7.q.out      |    45 +
 .../results/clientnegative/avro_decimal.q.out   |    22 +
 .../clientnegative/compact_non_acid_table.q.out |    11 +
 .../clientnegative/create_external_acid.q.out   |     5 +
 .../udf_format_number_wrong1.q.out              |     2 +-
 .../udf_format_number_wrong2.q.out              |     2 +-
 .../udf_format_number_wrong4.q.out              |     2 +-
 .../udf_format_number_wrong6.q.out              |     1 -
 .../clientnegative/updateBasicStats.q.out       |     2 +
 .../clientpositive/allcolref_in_udf.q.out       |     1 +
 .../alter_partition_coltype.q.out               |     3 +
 .../alter_rename_partition_authorization.q.out  |     2 +
 .../alter_table_update_status.q.out             |     2 +
 .../clientpositive/annotate_stats_filter.q.out  |   176 +-
 .../annotate_stats_join_pkfk.q.out              |    62 +-
 .../clientpositive/annotate_stats_part.q.out    |    12 +-
 .../clientpositive/annotate_stats_table.q.out   |     1 +
 .../clientpositive/authorization_1.q.out        |     2 +
 .../clientpositive/authorization_2.q.out        |     2 +
 .../clientpositive/authorization_3.q.out        |     2 +
 .../clientpositive/authorization_4.q.out        |     2 +
 .../clientpositive/authorization_6.q.out        |     2 +
 .../authorization_create_temp_table.q.out       |     2 +
 .../clientpositive/authorization_view_1.q.out   |   263 +
 .../clientpositive/authorization_view_2.q.out   |    66 +
 .../clientpositive/authorization_view_3.q.out   |    62 +
 .../clientpositive/authorization_view_4.q.out   |    64 +
 .../authorization_view_disable_cbo_1.q.out      |   311 +
 .../authorization_view_disable_cbo_2.q.out      |    66 +
 .../authorization_view_disable_cbo_3.q.out      |    62 +
 .../authorization_view_disable_cbo_4.q.out      |    64 +
 .../auto_join_reordering_values.q.out           |     8 +
 .../clientpositive/auto_join_stats.q.out        |    58 +-
 .../clientpositive/auto_join_stats2.q.out       |    38 +-
 .../clientpositive/auto_sortmerge_join_1.q.out  |     5 +
 .../clientpositive/auto_sortmerge_join_11.q.out |     4 +
 .../clientpositive/auto_sortmerge_join_12.q.out |     1 +
 .../clientpositive/auto_sortmerge_join_2.q.out  |     4 +
 .../clientpositive/auto_sortmerge_join_3.q.out  |     5 +
 .../clientpositive/auto_sortmerge_join_4.q.out  |     5 +
 .../clientpositive/auto_sortmerge_join_5.q.out  |     5 +
 .../clientpositive/auto_sortmerge_join_7.q.out  |     5 +
 .../clientpositive/auto_sortmerge_join_8.q.out  |     5 +
 .../clientpositive/autogen_colalias.q.out       |    22 +
 .../clientpositive/avro_decimal_old.q.out       |    60 +
 .../test/results/clientpositive/bucket1.q.out   |     1 +
 .../test/results/clientpositive/bucket2.q.out   |     1 +
 .../test/results/clientpositive/bucket3.q.out   |     1 +
 .../test/results/clientpositive/bucket4.q.out   |     1 +
 .../test/results/clientpositive/bucket5.q.out   |     2 +
 .../results/clientpositive/bucket_many.q.out    |     1 +
 .../clientpositive/bucket_map_join_1.q.out      |     1 +
 .../clientpositive/bucket_map_join_2.q.out      |     1 +
 .../clientpositive/bucketcontext_1.q.out        |     2 +
 .../clientpositive/bucketcontext_2.q.out        |     2 +
 .../clientpositive/bucketcontext_3.q.out        |     2 +
 .../clientpositive/bucketcontext_4.q.out        |     2 +
 .../clientpositive/bucketcontext_5.q.out        |     2 +
 .../clientpositive/bucketcontext_6.q.out        |     2 +
 .../clientpositive/bucketcontext_7.q.out        |     2 +
 .../clientpositive/bucketcontext_8.q.out        |     2 +
 .../bucketizedhiveinputformat.q.out             |     3 +-
 .../clientpositive/bucketmapjoin10.q.out        |     1 +
 .../clientpositive/bucketmapjoin11.q.out        |     2 +
 .../clientpositive/bucketmapjoin12.q.out        |     2 +
 .../clientpositive/bucketmapjoin13.q.out        |     4 +
 .../results/clientpositive/bucketmapjoin7.q.out |    19 +-
 .../results/clientpositive/bucketmapjoin8.q.out |     2 +
 .../results/clientpositive/bucketmapjoin9.q.out |     2 +
 .../cbo_SortUnionTransposeRule.q.out            |     2 +
 .../test/results/clientpositive/cbo_const.q.out |    26 +-
 .../cbo_rp_cross_product_check_2.q.out          |     4 +
 .../cbo_rp_outer_join_ppr.q.java1.7.out         |     4 +
 .../clientpositive/cbo_rp_unionDistinct_2.q.out |     6 +
 .../clientpositive/char_nested_types.q.out      |     2 +
 .../test/results/clientpositive/cluster.q.out   |   180 +-
 .../clientpositive/colstats_all_nulls.q.out     |     3 +
 .../clientpositive/column_access_stats.q.out    |    28 +-
 .../clientpositive/columnstats_partlvl.q.out    |     2 +
 .../clientpositive/columnstats_tbllvl.q.out     |     2 +
 .../test/results/clientpositive/combine2.q.out  |     1 +
 .../constantPropagateForSubQuery.q.out          |     2 +
 .../clientpositive/correlationoptimizer12.q.out |     4 +-
 .../clientpositive/create_default_prop.q.out    |     1 +
 .../results/clientpositive/create_view.q.out    |     2 +-
 .../clientpositive/cross_join_merge.q.out       |   490 +
 .../clientpositive/cross_product_check_1.q.out  |     4 +
 .../clientpositive/cross_product_check_2.q.out  |     4 +
 ql/src/test/results/clientpositive/ctas.q.out   |    24 +-
 .../test/results/clientpositive/ctas_char.q.out |     2 +
 .../results/clientpositive/ctas_colname.q.out   |    25 +-
 .../test/results/clientpositive/ctas_date.q.out |     4 +
 .../ctas_uses_database_location.q.out           |     2 +
 .../results/clientpositive/ctas_varchar.q.out   |     2 +
 ql/src/test/results/clientpositive/cte_2.q.out  |     1 +
 ql/src/test/results/clientpositive/cte_4.q.out  |     1 +
 .../test/results/clientpositive/database.q.out  |     2 +
 .../clientpositive/dbtxnmgr_compact1.q.out      |     4 +-
 .../clientpositive/dbtxnmgr_compact2.q.out      |     4 +-
 .../clientpositive/dbtxnmgr_compact3.q.out      |     4 +-
 .../results/clientpositive/dbtxnmgr_ddl1.q.out  |     2 +
 .../test/results/clientpositive/decimal_6.q.out |     2 +
 .../results/clientpositive/decimal_join2.q.out  |     2 +
 .../results/clientpositive/decimal_serde.q.out  |     6 +
 .../disable_merge_for_bucketing.q.out           |     1 +
 .../display_colstats_tbllvl.q.out               |     1 +
 .../dynpart_sort_opt_vectorization.q.out        |     4 +-
 .../results/clientpositive/empty_join.q.out     |     2 +
 .../encrypted/encryption_drop_partition.q.out   |    76 +
 .../encrypted/encryption_drop_table.q.out       |    27 +
 .../encryption_join_unencrypted_tbl.q.out       |     2 +
 ...on_join_with_different_encryption_keys.q.out |     2 +
 .../results/clientpositive/explain_ddl.q.out    |     2 +
 .../clientpositive/explain_logical.q.out        |     9 +-
 .../clientpositive/filter_join_breaktask.q.out  |     4 +
 .../clientpositive/fouter_join_ppr.q.out        |     8 +
 .../results/clientpositive/global_limit.q.out   |     1 +
 .../clientpositive/groupby_duplicate_key.q.out  |     3 +
 .../clientpositive/groupby_grouping_sets6.q.out |    44 +-
 .../groupby_grouping_window.q.out               |     2 +-
 .../clientpositive/groupby_map_ppr.q.out        |     1 +
 .../groupby_map_ppr_multi_distinct.q.out        |     1 +
 .../results/clientpositive/groupby_ppr.q.out    |     1 +
 .../groupby_ppr_multi_distinct.q.out            |     1 +
 .../clientpositive/groupby_resolution.q.out     |     2 +-
 .../clientpositive/groupby_sort_1_23.q.out      |    51 +-
 .../results/clientpositive/groupby_sort_6.q.out |     3 +
 .../clientpositive/groupby_sort_skew_1_23.q.out |    58 +-
 .../clientpositive/index_auto_empty.q.out       |     2 +-
 .../clientpositive/index_auto_file_format.q.out |     4 +-
 .../clientpositive/index_auto_multiple.q.out    |     2 +-
 .../clientpositive/index_auto_partitioned.q.out |     2 +-
 .../clientpositive/index_auto_update.q.out      |     2 +-
 .../index_bitmap_auto_partitioned.q.out         |     2 +-
 .../results/clientpositive/index_stale.q.out    |     2 +-
 .../index_stale_partitioned.q.out               |     2 +-
 .../clientpositive/infer_const_type.q.out       |    20 +-
 .../test/results/clientpositive/input23.q.out   |     2 +
 .../test/results/clientpositive/input46.q.out   |     4 +
 .../results/clientpositive/input_part4.q.out    |     2 +-
 .../results/clientpositive/input_part6.q.out    |     2 +-
 .../results/clientpositive/input_part7.q.out    |    38 +-
 .../test/results/clientpositive/insert0.q.out   |     2 +
 ql/src/test/results/clientpositive/join17.q.out |     2 +
 ql/src/test/results/clientpositive/join35.q.out |     2 +
 ql/src/test/results/clientpositive/join38.q.out |     8 +-
 ql/src/test/results/clientpositive/join41.q.out |     2 +
 ql/src/test/results/clientpositive/join42.q.out |    53 +-
 ql/src/test/results/clientpositive/join9.q.out  |     2 +
 .../clientpositive/join_filters_overlap.q.out   |    19 +
 .../test/results/clientpositive/keyword_3.q.out |    32 +
 .../clientpositive/lateral_view_outer.q.out     |     2 +
 .../test/results/clientpositive/lineage2.q.out  |     2 +-
 .../test/results/clientpositive/lineage3.q.out  |    10 +-
 .../list_bucket_query_multiskew_3.q.out         |     1 +
 .../list_bucket_query_oneskew_2.q.out           |     2 +
 .../llap/bucket_map_join_tez1.q.out             |   193 +-
 .../llap/bucket_map_join_tez2.q.out             |     4 +-
 .../results/clientpositive/llap/cte_2.q.out     |     1 +
 .../results/clientpositive/llap/cte_4.q.out     |     1 +
 .../llap/dynamic_partition_pruning.q.out        |   276 +-
 .../llap/dynamic_partition_pruning_2.q.out      |     8 +-
 .../llap/hybridgrace_hashjoin_1.q.out           |    24 +-
 .../llap/hybridgrace_hashjoin_2.q.out           |    24 +-
 .../clientpositive/llap/llap_nullscan.q.out     |     8 +-
 .../results/clientpositive/llap/llap_udf.q.out  |     2 +
 .../clientpositive/llap/llapdecider.q.out       |     4 +-
 .../clientpositive/llap/mapjoin_decimal.q.out   |     2 +-
 .../test/results/clientpositive/llap/mrr.q.out  |    10 +-
 .../llap/tez_bmj_schema_evolution.q.out         |     2 +-
 .../results/clientpositive/llap/tez_dml.q.out   |     6 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |    12 +-
 .../llap/tez_dynpart_hashjoin_2.q.out           |     6 +-
 .../clientpositive/llap/tez_join_hash.q.out     |     2 +-
 .../llap/tez_join_result_complex.q.out          |    34 +
 .../clientpositive/llap/tez_join_tests.q.out    |     2 +-
 .../clientpositive/llap/tez_joins_explain.q.out |     2 +-
 .../results/clientpositive/llap/tez_smb_1.q.out |     8 +-
 .../clientpositive/llap/tez_smb_main.q.out      |    20 +-
 .../results/clientpositive/llap/tez_union.q.out |    15 +-
 .../clientpositive/llap/tez_union2.q.out        |     4 +-
 .../llap/tez_union_multiinsert.q.out            |    28 +-
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |    12 +-
 .../llap/tez_vector_dynpart_hashjoin_2.q.out    |     6 +-
 .../vectorized_dynamic_partition_pruning.q.out  |   270 +-
 .../clientpositive/llap_partitioned.q.out       |     4 +
 .../clientpositive/llap_uncompressed.q.out      |    12 +
 .../clientpositive/louter_join_ppr.q.out        |     8 +
 ql/src/test/results/clientpositive/merge3.q.out |     5 +
 .../results/clientpositive/metadataonly1.q.out  |    12 +
 .../results/clientpositive/multi_insert.q.out   |    24 +-
 .../multi_insert_lateral_view.q.out             |     2 +
 ...i_insert_move_tasks_share_dependencies.q.out |    24 +-
 .../clientpositive/multi_insert_union_src.q.out |     2 +
 .../clientpositive/multi_join_union.q.out       |     8 +
 .../results/clientpositive/nestedvirtual.q.out  |     9 +
 .../clientpositive/non_ascii_literal2.q.out     |     2 +
 .../results/clientpositive/nullformatCTAS.q.out |     2 +
 .../clientpositive/optimize_nullscan.q.out      |    14 +
 .../results/clientpositive/orc_create.q.out     |    52 +
 .../results/clientpositive/orc_createas1.q.out  |     5 +
 .../results/clientpositive/orc_file_dump.q.out  |     6 +-
 .../test/results/clientpositive/orc_llap.q.out  |    42 +
 .../results/clientpositive/orc_merge10.q.out    |     4 +-
 .../results/clientpositive/orc_merge11.q.out    |     6 +-
 .../clientpositive/orc_merge_incompat3.q.out    |    70 +
 .../results/clientpositive/order_null.q.out     |   222 +
 .../clientpositive/outer_join_ppr.q.java1.7.out |     4 +
 .../clientpositive/parallel_orderby.q.out       |     4 +
 .../results/clientpositive/parquet_ctas.q.out   |     9 +
 .../results/clientpositive/parquet_join.q.out   |     5 +
 .../parquet_map_null.q.java1.7.out              |     1 +
 .../parquet_map_of_arrays_of_ints.q.out         |     1 +
 .../clientpositive/parquet_map_of_maps.q.out    |     1 +
 .../parquet_mixed_partition_formats2.q.out      |     4 +
 .../clientpositive/parquet_nested_complex.q.out |     5 +
 .../parquet_schema_evolution.q.out              |     2 +
 ...arquet_write_correct_definition_levels.q.out |     1 +
 .../clientpositive/partition_decode_name.q.out  |     2 +
 .../clientpositive/partition_special_char.q.out |     2 +
 ql/src/test/results/clientpositive/pcr.q.out    |   220 +-
 ql/src/test/results/clientpositive/pcs.q.out    |    49 +-
 .../results/clientpositive/perf/query12.q.out   |     2 +-
 .../results/clientpositive/perf/query20.q.out   |     2 +-
 .../results/clientpositive/perf/query28.q.out   |   192 +-
 .../results/clientpositive/perf/query51.q.out   |     6 +-
 .../results/clientpositive/perf/query65.q.out   |   116 +-
 .../results/clientpositive/perf/query67.q.out   |     2 +-
 .../results/clientpositive/perf/query70.q.out   |     4 +-
 .../results/clientpositive/perf/query88.q.out   |   912 +-
 .../results/clientpositive/perf/query89.q.out   |     2 +-
 .../results/clientpositive/perf/query98.q.out   |     2 +-
 .../results/clientpositive/pointlookup2.q.out   |   100 +-
 .../results/clientpositive/pointlookup3.q.out   |    96 +-
 .../results/clientpositive/pointlookup4.q.out   |    42 +-
 ql/src/test/results/clientpositive/ppd2.q.out   |    29 +-
 .../results/clientpositive/ppd_clusterby.q.out  |    91 +-
 .../clientpositive/ppd_join_filter.q.out        |    12 +
 .../clientpositive/ppd_outer_join5.q.out        |   127 +-
 .../results/clientpositive/ppd_udf_col.q.out    |    14 +-
 .../results/clientpositive/ppd_union_view.q.out |     4 +
 ql/src/test/results/clientpositive/ppd_vc.q.out |    18 +-
 .../results/clientpositive/ppd_windowing1.q.out |    66 +-
 ql/src/test/results/clientpositive/ptf.q.out    |   144 +-
 .../results/clientpositive/ptf_matchpath.q.out  |    34 +-
 .../results/clientpositive/ptf_streaming.q.out  |    96 +-
 .../results/clientpositive/ptfgroupbyjoin.q.out |     2 +-
 .../test/results/clientpositive/push_or.q.out   |    11 +-
 .../query_result_fileformat.q.out               |     2 +
 .../results/clientpositive/quotedid_basic.q.out |     4 +-
 .../clientpositive/rcfile_createas1.q.out       |     3 +
 .../clientpositive/rcfile_default_format.q.out  |     8 +
 .../reduceSinkDeDuplication_pRS_key_empty.q.out |   220 +
 .../clientpositive/reduce_deduplicate.q.out     |     2 +
 .../test/results/clientpositive/regex_col.q.out |    16 +-
 .../results/clientpositive/regexp_extract.q.out |     2 +
 .../clientpositive/router_join_ppr.q.out        |     8 +
 .../test/results/clientpositive/sample10.q.out  |     7 +-
 .../test/results/clientpositive/sample6.q.out   |    77 +-
 .../test/results/clientpositive/sample8.q.out   |     2 +
 .../sample_islocalmode_hook.q.out               |     4 +
 .../clientpositive/select_same_col.q.out        |     2 +
 .../test/results/clientpositive/semijoin.q.out  |     6 +
 .../test/results/clientpositive/semijoin2.q.out |     4 +-
 .../test/results/clientpositive/semijoin3.q.out |     4 +
 .../test/results/clientpositive/semijoin4.q.out |     2 +-
 .../set_processor_namespaces.q.out              |     2 +-
 .../results/clientpositive/show_functions.q.out |     4 +
 .../clientpositive/skewjoin_noskew.q.out        |     2 +
 .../clientpositive/skewjoin_onesideskew.q.out   |     2 +
 .../results/clientpositive/skiphf_aggr.q.out    |   267 +
 .../results/clientpositive/smb_mapjoin9.q.out   |     4 +
 .../results/clientpositive/smb_mapjoin_11.q.out |     1 +
 .../results/clientpositive/smb_mapjoin_13.q.out |    20 +-
 .../results/clientpositive/smb_mapjoin_15.q.out |    40 +-
 .../clientpositive/sort_merge_join_desc_5.q.out |     1 +
 .../clientpositive/sort_merge_join_desc_6.q.out |     1 +
 .../clientpositive/sort_merge_join_desc_7.q.out |     1 +
 .../spark/auto_join_reordering_values.q.out     |     8 +
 .../clientpositive/spark/auto_join_stats.q.out  |    31 +-
 .../clientpositive/spark/auto_join_stats2.q.out |    31 +-
 .../spark/auto_sortmerge_join_1.q.out           |     3 +
 .../spark/auto_sortmerge_join_12.q.out          |     1 +
 .../spark/auto_sortmerge_join_2.q.out           |     2 +
 .../spark/auto_sortmerge_join_3.q.out           |     3 +
 .../spark/auto_sortmerge_join_4.q.out           |     3 +
 .../spark/auto_sortmerge_join_5.q.out           |     3 +
 .../spark/auto_sortmerge_join_7.q.out           |     3 +
 .../spark/auto_sortmerge_join_8.q.out           |     3 +
 .../results/clientpositive/spark/bucket2.q.out  |     1 +
 .../results/clientpositive/spark/bucket3.q.out  |     1 +
 .../results/clientpositive/spark/bucket4.q.out  |     1 +
 .../results/clientpositive/spark/bucket5.q.out  |     2 +
 .../spark/bucket_map_join_1.q.out               |     1 +
 .../spark/bucket_map_join_2.q.out               |     1 +
 .../spark/bucketizedhiveinputformat.q.out       |     3 +-
 .../clientpositive/spark/bucketmapjoin10.q.out  |     1 +
 .../clientpositive/spark/bucketmapjoin11.q.out  |     2 +
 .../clientpositive/spark/bucketmapjoin12.q.out  |     2 +
 .../clientpositive/spark/bucketmapjoin13.q.out  |     4 +
 .../clientpositive/spark/bucketmapjoin7.q.out   |    19 +-
 .../clientpositive/spark/bucketmapjoin8.q.out   |     2 +
 .../clientpositive/spark/bucketmapjoin9.q.out   |     2 +
 .../spark/column_access_stats.q.out             |    28 +-
 .../spark/cross_product_check_1.q.out           |     4 +
 .../spark/cross_product_check_2.q.out           |     4 +
 .../results/clientpositive/spark/ctas.q.out     |    24 +-
 .../spark/disable_merge_for_bucketing.q.out     |     1 +
 .../spark/filter_join_breaktask.q.out           |     4 +
 .../clientpositive/spark/groupby_map_ppr.q.out  |     1 +
 .../spark/groupby_map_ppr_multi_distinct.q.out  |     1 +
 .../clientpositive/spark/groupby_ppr.q.out      |     1 +
 .../spark/groupby_ppr_multi_distinct.q.out      |     1 +
 .../spark/groupby_resolution.q.out              |     2 +-
 .../spark/groupby_sort_1_23.q.out               |    51 +-
 .../spark/groupby_sort_skew_1_23.q.out          |    58 +-
 .../results/clientpositive/spark/join17.q.out   |     2 +
 .../results/clientpositive/spark/join34.q.out   |     3 +
 .../results/clientpositive/spark/join35.q.out   |     5 +
 .../results/clientpositive/spark/join38.q.out   |     8 +-
 .../results/clientpositive/spark/join41.q.out   |     2 +
 .../results/clientpositive/spark/join9.q.out    |     2 +
 .../spark/join_filters_overlap.q.out            |    19 +
 .../clientpositive/spark/louter_join_ppr.q.out  |     8 +
 .../clientpositive/spark/multi_insert.q.out     |    24 +-
 .../spark/multi_insert_lateral_view.q.out       |     2 +
 ...i_insert_move_tasks_share_dependencies.q.out |    24 +-
 .../clientpositive/spark/multi_join_union.q.out |     8 +
 .../spark/optimize_nullscan.q.out               |    14 +
 .../spark/outer_join_ppr.q.java1.7.out          |     4 +
 .../clientpositive/spark/parallel_orderby.q.out |     4 +
 .../clientpositive/spark/parquet_join.q.out     |     5 +
 .../test/results/clientpositive/spark/pcr.q.out |   220 +-
 .../clientpositive/spark/ppd_join_filter.q.out  |    12 +
 .../clientpositive/spark/ppd_outer_join5.q.out  |    97 +-
 .../test/results/clientpositive/spark/ptf.q.out |   146 +-
 .../clientpositive/spark/ptf_matchpath.q.out    |    34 +-
 .../clientpositive/spark/ptf_streaming.q.out    |    96 +-
 .../spark/reduce_deduplicate.q.out              |     2 +
 .../clientpositive/spark/router_join_ppr.q.out  |     8 +
 .../results/clientpositive/spark/sample10.q.out |     7 +-
 .../results/clientpositive/spark/sample6.q.out  |    77 +-
 .../results/clientpositive/spark/sample8.q.out  |     2 +
 .../results/clientpositive/spark/semijoin.q.out |     6 +
 .../clientpositive/spark/skewjoin_noskew.q.out  |     2 +
 .../clientpositive/spark/smb_mapjoin_11.q.out   |     1 +
 .../clientpositive/spark/smb_mapjoin_12.q.out   |     2 +
 .../clientpositive/spark/smb_mapjoin_13.q.out   |    20 +-
 .../clientpositive/spark/smb_mapjoin_15.q.out   |    40 +-
 .../results/clientpositive/spark/stats5.q.out   |     2 +
 .../clientpositive/spark/subquery_in.q.out      |     4 +-
 .../clientpositive/spark/temp_table_join1.q.out |     2 +
 .../clientpositive/spark/transform_ppr1.q.out   |     1 +
 .../clientpositive/spark/transform_ppr2.q.out   |     1 +
 .../results/clientpositive/spark/union24.q.out  |    14 +
 .../results/clientpositive/spark/union27.q.out  |    52 +-
 .../results/clientpositive/spark/union31.q.out  |     8 +
 .../results/clientpositive/spark/union32.q.out  |     4 +
 .../results/clientpositive/spark/union33.q.out  |     4 +-
 .../clientpositive/spark/union_ppr.q.out        |    38 +-
 .../clientpositive/spark/union_remove_19.q.out  |    20 +-
 .../spark/union_remove_6_subq.q.out             |     2 +-
 .../clientpositive/spark/union_top_level.q.out  |     2 +
 .../spark/vector_between_in.q.out               |     4 +
 .../spark/vector_decimal_aggregate.q.out        |     4 +
 .../spark/vector_outer_join1.q.out              |   100 +-
 .../spark/vector_outer_join2.q.out              |    60 +
 .../spark/vector_outer_join3.q.out              |    60 +
 .../spark/vector_outer_join4.q.out              |   100 +-
 .../spark/vector_outer_join5.q.out              |     8 +
 .../spark/vectorization_decimal_date.q.out      |     4 +
 .../spark/vectorization_short_regress.q.out     |    12 +
 .../clientpositive/spark/vectorized_ptf.q.out   |   747 +-
 .../special_character_in_tabnames_1.q.out       |     2 +
 ql/src/test/results/clientpositive/stats5.q.out |     2 +
 .../clientpositive/str_to_map.q.java1.7.out     |     1 +
 .../results/clientpositive/subquery_in.q.out    |     4 +-
 .../clientpositive/subquery_in_having.q.out     |     2 +-
 .../results/clientpositive/subquery_notin.q.out |    12 +-
 .../subquery_unqualcolumnrefs.q.out             |     8 +-
 .../results/clientpositive/subquery_views.q.out |     8 +-
 .../results/clientpositive/temp_table.q.out     |     2 +
 .../temp_table_display_colstats_tbllvl.q.out    |     1 +
 .../clientpositive/temp_table_join1.q.out       |     2 +
 .../tez/auto_sortmerge_join_1.q.out             |     6 +
 .../tez/auto_sortmerge_join_10.q.out            |    42 +-
 .../tez/auto_sortmerge_join_11.q.out            |     9 +
 .../tez/auto_sortmerge_join_12.q.out            |     4 +
 .../tez/auto_sortmerge_join_2.q.out             |     4 +
 .../tez/auto_sortmerge_join_3.q.out             |     6 +
 .../tez/auto_sortmerge_join_4.q.out             |     6 +
 .../tez/auto_sortmerge_join_5.q.out             |     4 +
 .../tez/auto_sortmerge_join_7.q.out             |     6 +
 .../tez/auto_sortmerge_join_8.q.out             |     6 +
 .../results/clientpositive/tez/bucket2.q.out    |     1 +
 .../results/clientpositive/tez/bucket3.q.out    |     1 +
 .../results/clientpositive/tez/bucket4.q.out    |     1 +
 .../tez/bucket_map_join_tez1.q.out              |   177 +-
 .../tez/cross_product_check_1.q.out             |     4 +
 .../tez/cross_product_check_2.q.out             |   175 +-
 .../test/results/clientpositive/tez/ctas.q.out  |    24 +-
 .../test/results/clientpositive/tez/cte_2.q.out |     1 +
 .../test/results/clientpositive/tez/cte_4.q.out |     1 +
 .../tez/disable_merge_for_bucketing.q.out       |     1 +
 .../tez/dynamic_partition_pruning.q.out         |   174 +-
 .../tez/dynpart_sort_opt_vectorization.q.out    |     8 +-
 .../tez/dynpart_sort_optimization.q.out         |     4 +-
 .../results/clientpositive/tez/empty_join.q.out |     2 +
 .../clientpositive/tez/explainuser_1.q.out      |   132 +-
 .../clientpositive/tez/explainuser_2.q.out      |    36 +-
 .../clientpositive/tez/explainuser_3.q.out      |     2 +
 .../tez/filter_join_breaktask.q.out             |     4 +
 .../tez/hybridgrace_hashjoin_1.q.out            |     4 +
 .../clientpositive/tez/llap_nullscan.q.out      |     4 +
 .../clientpositive/tez/llapdecider.q.out        |     4 +-
 .../clientpositive/tez/mapjoin_mapjoin.q.out    |     2 +
 .../clientpositive/tez/metadataonly1.q.out      |    12 +
 .../clientpositive/tez/optimize_nullscan.q.out  |    14 +
 .../clientpositive/tez/orc_merge10.q.out        |     4 +-
 .../clientpositive/tez/orc_merge11.q.out        |     6 +-
 .../tez/orc_merge_incompat3.q.out               |    70 +
 .../results/clientpositive/tez/order_null.q.out |   222 +
 .../test/results/clientpositive/tez/ptf.q.out   |   144 +-
 .../clientpositive/tez/ptf_matchpath.q.out      |    34 +-
 .../clientpositive/tez/ptf_streaming.q.out      |    96 +-
 .../clientpositive/tez/subquery_in.q.out        |     4 +-
 .../results/clientpositive/tez/temp_table.q.out |     2 +
 .../results/clientpositive/tez/tez_dml.q.out    |     4 +
 .../tez/tez_join_result_complex.q.out           |    34 +
 .../results/clientpositive/tez/tez_union.q.out  |    11 +
 .../clientpositive/tez/transform_ppr1.q.out     |     1 +
 .../clientpositive/tez/transform_ppr2.q.out     |     1 +
 .../clientpositive/tez/unionDistinct_1.q.out    |   103 +-
 .../clientpositive/tez/unionDistinct_2.q.out    |     6 +
 .../clientpositive/tez/union_fast_stats.q.out   |   130 +-
 .../tez/vector_between_columns.q.out            |     4 +
 .../clientpositive/tez/vector_between_in.q.out  |     4 +
 .../tez/vector_char_mapjoin1.q.out              |     6 +
 .../tez/vector_decimal_10_0.q.out               |     1 +
 .../clientpositive/tez/vector_decimal_3.q.out   |     2 +
 .../clientpositive/tez/vector_decimal_6.q.out   |     2 +
 .../tez/vector_decimal_aggregate.q.out          |     4 +
 .../tez/vector_decimal_expressions.q.out        |     3 +
 .../tez/vector_decimal_math_funcs.q.out         |     4 +
 .../tez/vector_groupby_mapjoin.q.out            |    53 +-
 .../tez/vector_grouping_sets.q.out              |    29 +
 .../clientpositive/tez/vector_inner_join.q.out  |   166 +-
 .../tez/vector_interval_mapjoin.q.out           |    26 +
 .../clientpositive/tez/vector_join30.q.out      |     2 +
 .../tez/vector_join_filters.q.out               |     2 +
 .../clientpositive/tez/vector_join_nulls.q.out  |     2 +
 .../tez/vector_leftsemi_mapjoin.q.out           |     6 +
 .../tez/vector_mapjoin_reduce.q.out             |   118 +-
 .../tez/vector_multi_insert.q.out               |     1 +
 .../tez/vector_nullsafe_join.q.out              |     2 +
 .../clientpositive/tez/vector_outer_join1.q.out |   108 +-
 .../clientpositive/tez/vector_outer_join2.q.out |    60 +
 .../clientpositive/tez/vector_outer_join3.q.out |    60 +
 .../clientpositive/tez/vector_outer_join4.q.out |   108 +-
 .../clientpositive/tez/vector_outer_join5.q.out |     8 +
 .../clientpositive/tez/vector_outer_join6.q.out |    12 +
 .../tez/vector_partitioned_date_time.q.out      |     6 +
 .../tez/vector_reduce_groupby_decimal.q.out     |     4 +
 .../tez/vector_varchar_mapjoin1.q.out           |     6 +
 .../tez/vectorization_decimal_date.q.out        |     4 +
 .../tez/vectorization_short_regress.q.out       |    12 +
 .../vectorized_dynamic_partition_pruning.q.out  |   176 +-
 .../clientpositive/tez/vectorized_ptf.q.out     |   745 +-
 .../clientpositive/tez/windowing_gby.q.out      |     2 +-
 .../results/clientpositive/transform_ppr1.q.out |     1 +
 .../results/clientpositive/transform_ppr2.q.out |     1 +
 .../test/results/clientpositive/udf_chr.q.out   |   Bin 0 -> 1476 bytes
 .../results/clientpositive/udf_explode.q.out    |     2 +
 .../clientpositive/udf_format_number.q.out      |   101 +-
 .../results/clientpositive/udf_replace.q.out    |    32 +
 .../clientpositive/udf_unix_timestamp.q.out     |     2 +
 .../results/clientpositive/udtf_explode.q.out   |     4 +
 .../test/results/clientpositive/union22.q.out   |     2 +
 .../test/results/clientpositive/union24.q.out   |    14 +
 .../test/results/clientpositive/union27.q.out   |    52 +-
 .../test/results/clientpositive/union31.q.out   |     8 +
 .../test/results/clientpositive/union32.q.out   |     4 +
 .../test/results/clientpositive/union33.q.out   |     4 +-
 .../clientpositive/unionDistinct_1.q.out        |   100 +-
 .../clientpositive/unionDistinct_2.q.out        |     6 +
 .../clientpositive/union_fast_stats.q.out       |   136 +-
 .../test/results/clientpositive/union_ppr.q.out |    38 +-
 .../clientpositive/union_remove_19.q.out        |    32 +-
 .../clientpositive/union_remove_6_subq.q.out    |     2 +-
 .../clientpositive/union_top_level.q.out        |     2 +
 .../clientpositive/updateAccessTime.q.out       |     2 +
 .../clientpositive/updateBasicStats.q.out       |     2 +
 .../clientpositive/varchar_nested_types.q.out   |     2 +
 .../clientpositive/vector_between_columns.q.out |     4 +
 .../clientpositive/vector_between_in.q.out      |     4 +
 .../clientpositive/vector_char_mapjoin1.q.out   |     6 +
 .../clientpositive/vector_decimal_10_0.q.out    |     1 +
 .../clientpositive/vector_decimal_3.q.out       |     2 +
 .../clientpositive/vector_decimal_6.q.out       |     2 +
 .../vector_decimal_aggregate.q.out              |     4 +
 .../vector_decimal_expressions.q.out            |     3 +
 .../vector_decimal_math_funcs.q.out             |     4 +
 .../clientpositive/vector_grouping_sets.q.out   |    29 +
 .../vector_interval_mapjoin.q.out               |    26 +
 .../results/clientpositive/vector_join30.q.out  |     2 +
 .../clientpositive/vector_join_filters.q.out    |     2 +
 .../clientpositive/vector_join_nulls.q.out      |     2 +
 .../vector_leftsemi_mapjoin.q.out               |     6 +
 .../clientpositive/vector_multi_insert.q.out    |     1 +
 .../clientpositive/vector_nullsafe_join.q.out   |     2 +
 .../clientpositive/vector_outer_join1.q.out     |    60 +
 .../clientpositive/vector_outer_join2.q.out     |    60 +
 .../clientpositive/vector_outer_join3.q.out     |    60 +
 .../clientpositive/vector_outer_join4.q.out     |    60 +
 .../clientpositive/vector_outer_join5.q.out     |     8 +
 .../clientpositive/vector_outer_join6.q.out     |    12 +
 .../vector_partitioned_date_time.q.out          |     6 +
 .../vector_reduce_groupby_decimal.q.out         |     4 +
 .../results/clientpositive/vector_udf1.q.out    |  1640 ++
 .../vector_varchar_mapjoin1.q.out               |     6 +
 .../vectorization_decimal_date.q.out            |     4 +
 .../vectorization_short_regress.q.out           |    12 +
 .../results/clientpositive/vectorized_ptf.q.out |   745 +-
 .../clientpositive/windowing_navfn.q.out        |     2 +
 .../clientpositive/windowing_order_null.q.out   |   183 +
 .../windowing_range_multiorder.q.out            |   910 +
 .../clientpositive/windowing_streaming.q.out    |    12 +-
 serde/if/serde.thrift                           |     1 +
 serde/pom.xml                                   |    22 +
 .../src/gen/thrift/gen-cpp/serde_constants.cpp  |     2 +
 serde/src/gen/thrift/gen-cpp/serde_constants.h  |     1 +
 .../hadoop/hive/serde/serdeConstants.java       |     2 +
 .../org/apache/hadoop/hive/serde/Types.php      |     5 +
 .../org_apache_hadoop_hive_serde/constants.py   |     1 +
 serde/src/gen/thrift/gen-rb/serde_constants.rb  |     2 +
 .../hive/serde2/avro/AvroDeserializer.java      |     2 +-
 .../binarysortable/BinarySortableSerDe.java     |   110 +-
 .../BinarySortableSerDeWithEndPrefix.java       |     2 +-
 .../fast/BinarySortableSerializeWrite.java      |   130 +-
 .../binarysortable/TestBinarySortableFast.java  |    30 +-
 .../binarysortable/TestBinarySortableSerDe.java |    18 +-
 service-rpc/if/TCLIService.thrift               |     9 +
 .../gen/thrift/gen-cpp/TCLIService_types.cpp    |    66 +
 .../src/gen/thrift/gen-cpp/TCLIService_types.h  |    28 +-
 .../rpc/thrift/TGetOperationStatusResp.java     |   312 +-
 service-rpc/src/gen/thrift/gen-php/Types.php    |    69 +
 .../gen-py/TCLIService/TCLIService-remote       |     0
 .../src/gen/thrift/gen-py/TCLIService/ttypes.py |    41 +-
 .../gen/thrift/gen-rb/t_c_l_i_service_types.rb  |     8 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |    18 +-
 .../auth/AuthenticationProviderFactory.java     |    12 +-
 .../auth/CustomAuthenticationProviderImpl.java  |    13 +-
 .../hive/service/auth/HiveAuthFactory.java      |    81 +-
 .../auth/LdapAuthenticationProviderImpl.java    |     3 +-
 .../auth/PamAuthenticationProviderImpl.java     |     3 +-
 .../org/apache/hive/service/cli/CLIService.java |     4 +-
 .../hive/service/cli/OperationStatus.java       |    20 +-
 .../hive/service/cli/operation/Operation.java   |    41 +-
 .../service/cli/operation/SQLOperation.java     |    49 +-
 .../service/cli/session/HiveSessionImpl.java    |    21 +-
 .../cli/session/HiveSessionImplwithUGI.java     |     2 +-
 .../thrift/RetryingThriftCLIServiceClient.java  |    26 +-
 .../service/cli/thrift/ThriftCLIService.java    |     3 +
 .../cli/thrift/ThriftCLIServiceClient.java      |     3 +-
 .../cli/thrift/ThriftHttpCLIService.java        |     2 +-
 .../service/cli/thrift/ThriftHttpServlet.java   |    26 +-
 .../apache/hive/service/server/HiveServer2.java |     2 +-
 .../auth/TestLdapAtnProviderWithMiniDS.java     |     2 +-
 .../TestLdapAuthenticationProviderImpl.java     |     2 +-
 .../apache/hive/service/cli/CLIServiceTest.java |   104 +-
 .../cli/TestRetryingThriftCLIServiceClient.java |    15 +-
 .../apache/hadoop/hive/shims/Hadoop23Shims.java |     2 +-
 .../apache/hadoop/hive/thrift/DBTokenStore.java |    49 +-
 .../thrift/DelegationTokenSecretManager.java    |    25 +
 .../hive/thrift/HadoopThriftAuthBridge.java     |   150 +-
 .../hive/thrift/HiveDelegationTokenManager.java |   172 +
 .../TokenStoreDelegationTokenSecretManager.java |    10 -
 .../hadoop/hive/thrift/ZooKeeperTokenStore.java |    16 +-
 spark-client/pom.xml                            |    22 +
 .../apache/hadoop/hive/common/io/DataCache.java |     4 +-
 .../common/io/encoded/EncodedColumnBatch.java   |     9 +-
 .../ql/exec/vector/DecimalColumnVector.java     |    11 +-
 .../hadoop/hive/ql/io/sarg/PredicateLeaf.java   |     1 -
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |     1 -
 875 files changed, 49806 insertions(+), 18346 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/2945c3b2/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
----------------------------------------------------------------------


[42/51] [abbrv] hive git commit: HIVE-13226: Improve tez print summary to print query execution breakdown (Prasanth Jayachandran reviewed by Gopal V)

Posted by jd...@apache.org.
HIVE-13226: Improve tez print summary to print query execution breakdown (Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/81282008
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/81282008
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/81282008

Branch: refs/heads/llap
Commit: 812820086098f238214adee26d65c7cb617056d3
Parents: d5b1adb
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Mar 15 13:13:29 2016 -0500
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Mar 15 13:13:29 2016 -0500

----------------------------------------------------------------------
 .../llap/io/encoded/OrcEncodedDataReader.java   |   2 +-
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  | 142 +++++++++++--------
 2 files changed, 80 insertions(+), 64 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/81282008/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index bcee56b..8111c6d 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -771,7 +771,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     } else if (!isNone) {
       count = rgCount;
     }
-    counters.setCounter(LlapIOCounters.SELECTED_ROWGROUPS, count);
+    counters.incrCounter(LlapIOCounters.SELECTED_ROWGROUPS, count);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/81282008/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
index 418a03e..67f9da8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
@@ -73,7 +73,7 @@ public class TezJobMonitor {
   private static final int COLUMN_1_WIDTH = 16;
   private static final int SEPARATOR_WIDTH = InPlaceUpdates.MIN_TERMINAL_WIDTH;
   private static final String SEPARATOR = new String(new char[SEPARATOR_WIDTH]).replace("\0", "-");
-  private static final String PREP_SUMMARY_HEADER = "DAG Preparation Summary";
+  private static final String QUERY_EXEC_SUMMARY_HEADER = "Query Execution Summary";
   private static final String TASK_SUMMARY_HEADER = "Task Execution Summary";
   private static final String LLAP_IO_SUMMARY_HEADER = "LLAP IO Summary";
 
@@ -95,9 +95,11 @@ public class TezJobMonitor {
   private static final String LLAP_SUMMARY_HEADER = String.format(LLAP_SUMMARY_HEADER_FORMAT,
       "VERTICES", "ROWGROUPS", "META_HIT", "META_MISS", "DATA_HIT", "DATA_MISS",
       "ALLOCATION", "USED", "TOTAL_IO");
-  private static final String TOTAL_PREP_TIME = "TotalPrepTime";
-  private static final String METHOD = "METHOD";
-  private static final String DURATION = "DURATION(ms)";
+
+  // Methods summary
+  private static final String OPERATION_SUMMARY = "%-35s %9s";
+  private static final String OPERATION = "OPERATION";
+  private static final String DURATION = "DURATION";
 
   // in-place progress update related variables
   private int lines;
@@ -214,6 +216,7 @@ public class TezJobMonitor {
 
     boolean running = false;
     boolean done = false;
+    boolean success = false;
     int failedCounter = 0;
     int rc = 0;
     DAGStatus.State lastState = null;
@@ -231,12 +234,12 @@ public class TezJobMonitor {
     console.printInfo("\n");
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
     perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_TO_RUNNING);
-
+    Map<String, Progress> progressMap = null;
     while (true) {
 
       try {
         status = dagClient.getDAGStatus(opts, checkInterval);
-        Map<String, Progress> progressMap = status.getVertexProgress();
+        progressMap = status.getVertexProgress();
         DAGStatus.State state = status.getState();
 
         if (state != lastState || state == RUNNING) {
@@ -277,35 +280,7 @@ public class TezJobMonitor {
             } else {
               lastReport = printStatus(progressMap, lastReport, console);
             }
-
-            /* Profile info is collected anyways, isProfileEnabled
-             * decides if it gets printed or not
-             */
-            if (isProfileEnabled) {
-
-              double duration = (System.currentTimeMillis() - startTime) / 1000.0;
-              console.printInfo("Status: DAG finished successfully in "
-                  + String.format("%.2f seconds", duration));
-              console.printInfo("\n");
-
-              console.printInfo(PREP_SUMMARY_HEADER);
-              printMethodsSummary();
-              console.printInfo(SEPARATOR);
-              console.printInfo("");
-
-              console.printInfo(TASK_SUMMARY_HEADER);
-              printDagSummary(progressMap, console, dagClient, conf, dag);
-              console.printInfo(SEPARATOR);
-              console.printInfo("");
-
-              if (llapIoEnabled) {
-                console.printInfo(LLAP_IO_SUMMARY_HEADER);
-                printLlapIOSummary(progressMap, console, dagClient);
-                console.printInfo(SEPARATOR);
-              }
-
-              console.printInfo("\n");
-            }
+            success = true;
             running = false;
             done = true;
             break;
@@ -376,6 +351,33 @@ public class TezJobMonitor {
     }
 
     perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_RUN_DAG);
+
+    if (isProfileEnabled && success && progressMap != null) {
+
+      double duration = (System.currentTimeMillis() - startTime) / 1000.0;
+      console.printInfo("Status: DAG finished successfully in "
+          + String.format("%.2f seconds", duration));
+      console.printInfo("\n");
+
+      console.printInfo(QUERY_EXEC_SUMMARY_HEADER);
+      printQueryExecutionBreakDown();
+      console.printInfo(SEPARATOR);
+      console.printInfo("");
+
+      console.printInfo(TASK_SUMMARY_HEADER);
+      printDagSummary(progressMap, console, dagClient, conf, dag);
+      console.printInfo(SEPARATOR);
+      console.printInfo("");
+
+      if (llapIoEnabled) {
+        console.printInfo(LLAP_IO_SUMMARY_HEADER);
+        printLlapIOSummary(progressMap, console, dagClient);
+        console.printInfo(SEPARATOR);
+      }
+
+      console.printInfo("\n");
+    }
+
     return rc;
   }
 
@@ -414,37 +416,44 @@ public class TezJobMonitor {
     return (tezCounter == null) ? 0 : tezCounter.getValue();
   }
 
-  private void printMethodsSummary() {
-    long totalInPrepTime = 0;
-
-    String[] perfLoggerReportMethods = {
-        (PerfLogger.PARSE),
-        (PerfLogger.ANALYZE),
-        (PerfLogger.TEZ_BUILD_DAG),
-        (PerfLogger.TEZ_SUBMIT_TO_RUNNING)
-    };
+  private void printQueryExecutionBreakDown() {
 
     /* Build the method summary header */
-    String methodBreakdownHeader = String.format("%-30s %-13s", METHOD, DURATION);
+    String execBreakdownHeader = String.format(OPERATION_SUMMARY, OPERATION, DURATION);
     console.printInfo(SEPARATOR);
-    reprintLineWithColorAsBold(methodBreakdownHeader, Ansi.Color.CYAN);
+    reprintLineWithColorAsBold(execBreakdownHeader, Ansi.Color.CYAN);
     console.printInfo(SEPARATOR);
 
-    for (String method : perfLoggerReportMethods) {
-      long duration = perfLogger.getDuration(method);
-      totalInPrepTime += duration;
-      console.printInfo(String.format("%-30s %11s", method, commaFormat.format(duration)));
-    }
-
-    /*
-     * The counters list above don't capture the total time from TimeToSubmit.startTime till
-     * TezRunDag.startTime, so calculate the duration and print it.
-     */
-    totalInPrepTime = perfLogger.getStartTime(PerfLogger.TEZ_RUN_DAG) -
-        perfLogger.getStartTime(PerfLogger.TIME_TO_SUBMIT);
+    // parse, analyze, optimize and compile
+    long compile = perfLogger.getEndTime(PerfLogger.COMPILE) -
+        perfLogger.getStartTime(PerfLogger.DRIVER_RUN);
+    console.printInfo(String.format(OPERATION_SUMMARY, "Compile Query",
+        secondsFormat.format(compile / 1000.0) + "s"));
+
+    // prepare plan for submission (building DAG, adding resources, creating scratch dirs etc.)
+    long totalDAGPrep = perfLogger.getStartTime(PerfLogger.TEZ_SUBMIT_DAG) -
+        perfLogger.getEndTime(PerfLogger.COMPILE);
+    console.printInfo(String.format(OPERATION_SUMMARY, "Prepare Plan",
+        secondsFormat.format(totalDAGPrep / 1000.0) + "s"));
+
+    // submit to accept dag (if session is closed, this will include re-opening of session time,
+    // localizing files for AM, submitting DAG)
+    long submitToAccept = perfLogger.getStartTime(PerfLogger.TEZ_RUN_DAG) -
+        perfLogger.getStartTime(PerfLogger.TEZ_SUBMIT_DAG);
+    console.printInfo(String.format(OPERATION_SUMMARY, "Submit Plan",
+        secondsFormat.format(submitToAccept / 1000.0) + "s"));
+
+    // accept to start dag (schedule wait time, resource wait time etc.)
+    long acceptToStart = perfLogger.getDuration(PerfLogger.TEZ_SUBMIT_TO_RUNNING);
+    console.printInfo(String.format(OPERATION_SUMMARY, "Start",
+        secondsFormat.format(acceptToStart / 1000.0) + "s"));
+
+    // time to actually run the dag (actual dag runtime)
+    long startToEnd = perfLogger.getEndTime(PerfLogger.TEZ_RUN_DAG) -
+        perfLogger.getEndTime(PerfLogger.TEZ_SUBMIT_TO_RUNNING);
+    console.printInfo(String.format(OPERATION_SUMMARY, "Finish",
+        secondsFormat.format(startToEnd / 1000.0) + "s"));
 
-    console.printInfo(String.format("%-30s %11s", TOTAL_PREP_TIME, commaFormat.format(
-        totalInPrepTime)));
   }
 
   private void printDagSummary(Map<String, Progress> progressMap, LogHelper console,
@@ -582,7 +591,7 @@ public class TezJobMonitor {
   }
 
   private void printLlapIOSummary(Map<String, Progress> progressMap, LogHelper console,
-      DAGClient dagClient) throws Exception {
+      DAGClient dagClient) {
     SortedSet<String> keys = new TreeSet<>(progressMap.keySet());
     Set<StatusGetOpts> statusOptions = new HashSet<>(1);
     statusOptions.add(StatusGetOpts.GET_COUNTERS);
@@ -593,8 +602,15 @@ public class TezJobMonitor {
       if (vertexName.startsWith("Reducer")) {
         continue;
       }
-      TezCounters vertexCounters = dagClient.getVertexStatus(vertexName, statusOptions)
-          .getVertexCounters();
+      TezCounters vertexCounters = null;
+      try {
+        vertexCounters = dagClient.getVertexStatus(vertexName, statusOptions)
+            .getVertexCounters();
+      } catch (IOException e) {
+        // best attempt, shouldn't really kill DAG for this
+      } catch (TezException e) {
+        // best attempt, shouldn't really kill DAG for this
+      }
       if (vertexCounters != null) {
         final long selectedRowgroups = getCounterValueByGroupName(vertexCounters,
             counterGroup, LlapIOCounters.SELECTED_ROWGROUPS.name());


[16/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_interval_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_interval_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_interval_mapjoin.q.out
index ce79ad1..4775167 100644
--- a/ql/src/test/results/clientpositive/tez/vector_interval_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_interval_mapjoin.q.out
@@ -54,6 +54,19 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@vectortab_a_1k
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@vectortab_a_1korc
+POSTHOOK: Lineage: vectortab_a_1korc.b SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.bo SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.d SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.dc SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.dt SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:dt, type:date, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.f SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.i SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.s SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.s2 SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.si SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.t SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.ts SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: vectortab_a_1korc.ts2 SIMPLE [(vectortab_a_1k)vectortab_a_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
 PREHOOK: query: create table vectortab_b_1k(
             t tinyint,
             si smallint,
@@ -110,6 +123,19 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@vectortab_b_1k
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@vectortab_b_1korc
+POSTHOOK: Lineage: vectortab_b_1korc.b SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.bo SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.d SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.dc SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.dt SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:dt, type:date, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.f SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.i SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.s SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.s2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:s2, type:string, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.si SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.t SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.ts SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: vectortab_b_1korc.ts2 SIMPLE [(vectortab_b_1k)vectortab_b_1k.FieldSchema(name:ts2, type:timestamp, comment:null), ]
 PREHOOK: query: explain
 select
    v1.s,

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_join30.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join30.q.out b/ql/src/test/results/clientpositive/tez/vector_join30.q.out
index b3deef5..dd5b5aa 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join30.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join30.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orcsrc
+POSTHOOK: Lineage: orcsrc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: orcsrc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain
 FROM 
 (SELECT orcsrc.* FROM orcsrc sort by key) x

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out b/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
index cc8122a..d50e079 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join_filters.q.out
@@ -28,6 +28,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[19][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value))  FROM myinput1 a JOIN myinput1 b on a.key > 40 AND a.value > 50 AND a.key = a.value AND b.key > 40 AND b.value > 50 AND b.key = b.value
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out b/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
index 3c9ce0a..97b3242 100644
--- a/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_join_nulls.q.out
@@ -28,6 +28,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Map 1' is a cross product
 PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a JOIN myinput1 b
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
index 59499c7..d7bf9af 100644
--- a/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_leftsemi_mapjoin.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from t1 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -41,6 +43,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(t1)t1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t2 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t2
@@ -72,6 +76,8 @@ POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t3
+POSTHOOK: Lineage: t3.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), (t2)t2.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t3.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), (t2)t2.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t3 sort by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_multi_insert.q.out b/ql/src/test/results/clientpositive/tez/vector_multi_insert.q.out
index f72949c..e940f22 100644
--- a/ql/src/test/results/clientpositive/tez/vector_multi_insert.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_multi_insert.q.out
@@ -32,6 +32,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@orc1
+POSTHOOK: Lineage: orc1.rn EXPRESSION []
 PREHOOK: query: create table orc_rn1 (rn int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_nullsafe_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_nullsafe_join.q.out b/ql/src/test/results/clientpositive/tez/vector_nullsafe_join.q.out
index 75304dc..045b687 100644
--- a/ql/src/test/results/clientpositive/tez/vector_nullsafe_join.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_nullsafe_join.q.out
@@ -46,6 +46,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@myinput1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@myinput1
+POSTHOOK: Lineage: myinput1.key SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: myinput1.value SIMPLE [(myinput1_txt)myinput1_txt.FieldSchema(name:value, type:int, comment:null), ]
 PREHOOK: query: -- merging
 explain select * from myinput1 a join myinput1 b on a.key<=>b.value
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
index 4e2e62c..946a558 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join1.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -117,6 +165,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
index 20c5ac8..1998344 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join2.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
index 9fab2c7..f20163b 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join3.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
index a6690b6..90a9efb 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join4.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1b
+POSTHOOK: Lineage: small_alltypesorc1b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2b
+POSTHOOK: Lineage: small_alltypesorc2b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3b
+POSTHOOK: Lineage: small_alltypesorc3b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4b
+POSTHOOK: Lineage: small_alltypesorc4b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b
@@ -132,6 +180,18 @@ POSTHOOK: Input: default@small_alltypesorc3b
 POSTHOOK: Input: default@small_alltypesorc4b
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_b
+POSTHOOK: Lineage: small_alltypesorc_b.cbigint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cdouble EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cfloat EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.csmallint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctinyint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_b

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
index a0bca51..c1c251f 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join5.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sorted_mod_4
+POSTHOOK: Lineage: sorted_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: sorted_mod_4.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@sorted_mod_4
@@ -46,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table
+POSTHOOK: Lineage: small_table.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table
@@ -684,6 +688,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@mod_8_mod_4
+POSTHOOK: Lineage: mod_8_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: mod_8_mod_4.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mod_8_mod_4
@@ -712,6 +718,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table2
+POSTHOOK: Lineage: small_table2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table2.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_outer_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/tez/vector_outer_join6.q.out
index acf8f3c..bdcdc42 100644
--- a/ql/src/test/results/clientpositive/tez/vector_outer_join6.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_outer_join6.q.out
@@ -84,6 +84,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c2, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin2_txt
@@ -94,6 +97,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin2_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN3 stored as orc AS SELECT * FROM TJOIN3_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin3_txt
@@ -104,6 +110,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin3_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN3
+POSTHOOK: Lineage: tjoin3.c1 SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin3.c2 SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin3.rnum SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN4 stored as orc AS SELECT * FROM TJOIN4_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin4_txt
@@ -114,6 +123,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin4_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN4
+POSTHOOK: Lineage: tjoin4.c1 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin4.c2 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin4.rnum SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: explain
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
index b95df67..13e70dd 100644
--- a/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_partitioned_date_time.q.out
@@ -52,6 +52,12 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@flights_tiny
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@flights_tiny_orc
+POSTHOOK: Lineage: flights_tiny_orc.arr_delay SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:arr_delay, type:float, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.dest_city_name SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:dest_city_name, type:string, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_date SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:fl_date, type:date, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_num SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:fl_num, type:int, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_time EXPRESSION [(flights_tiny)flights_tiny.FieldSchema(name:fl_date, type:date, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.origin_city_name SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:origin_city_name, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM flights_tiny_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@flights_tiny_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
index b873355..8c58232 100644
--- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out
@@ -10,6 +10,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN
 SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
 WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vector_varchar_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/tez/vector_varchar_mapjoin1.q.out
index 32e6fc1..d50d875 100644
--- a/ql/src/test/results/clientpositive/tez/vector_varchar_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/tez/vector_varchar_mapjoin1.q.out
@@ -98,6 +98,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_vc1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_vc1_orc
+POSTHOOK: Lineage: varchar_join1_vc1_orc.c1 SIMPLE [(varchar_join1_vc1)varchar_join1_vc1.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_vc1_orc.c2 SIMPLE [(varchar_join1_vc1)varchar_join1_vc1.FieldSchema(name:c2, type:varchar(10), comment:null), ]
 PREHOOK: query: create table varchar_join1_vc2_orc stored as orc as select * from varchar_join1_vc2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@varchar_join1_vc2
@@ -108,6 +110,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_vc2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_vc2_orc
+POSTHOOK: Lineage: varchar_join1_vc2_orc.c1 SIMPLE [(varchar_join1_vc2)varchar_join1_vc2.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_vc2_orc.c2 SIMPLE [(varchar_join1_vc2)varchar_join1_vc2.FieldSchema(name:c2, type:varchar(20), comment:null), ]
 PREHOOK: query: create table varchar_join1_str_orc stored as orc as select * from varchar_join1_str
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@varchar_join1_str
@@ -118,6 +122,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_str
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_str_orc
+POSTHOOK: Lineage: varchar_join1_str_orc.c1 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_str_orc.c2 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join varchar with same length varchar
 explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out
index c20033c..9a6cb52 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@date_decimal_test
+POSTHOOK: Lineage: date_decimal_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdecimal EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
index dae637b..32fdef6 100644
--- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
@@ -3040,6 +3040,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesnull
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesnullorc
+POSTHOOK: Lineage: alltypesnullorc.cbigint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cdouble SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cfloat SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.csmallint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctinyint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: explain
 select count(*) from alltypesnullorc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
index e1c76f5..f3e31d4 100644
--- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out
@@ -132,6 +132,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date
+POSTHOOK: Lineage: srcpart_date.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -150,6 +152,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_hour
+POSTHOOK: Lineage: srcpart_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -168,6 +172,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_date_hour
+POSTHOOK: Lineage: srcpart_date_hour.date SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.ds SIMPLE [(srcpart)srcpart.FieldSchema(name:ds, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_date_hour.hr SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@srcpart
@@ -186,6 +194,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@srcpart_double_hour
+POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
+POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ]
 PREHOOK: query: -- single column, single key
 EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'
 PREHOOK: type: QUERY


[02/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 0da1acf..ac8d8a4 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -954,6 +954,56 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
+  def add_token(self, token_identifier, delegation_token):
+    """
+    Parameters:
+     - token_identifier
+     - delegation_token
+    """
+    pass
+
+  def remove_token(self, token_identifier):
+    """
+    Parameters:
+     - token_identifier
+    """
+    pass
+
+  def get_token(self, token_identifier):
+    """
+    Parameters:
+     - token_identifier
+    """
+    pass
+
+  def get_all_token_identifiers(self):
+    pass
+
+  def add_master_key(self, key):
+    """
+    Parameters:
+     - key
+    """
+    pass
+
+  def update_master_key(self, seq_number, key):
+    """
+    Parameters:
+     - seq_number
+     - key
+    """
+    pass
+
+  def remove_master_key(self, key_seq):
+    """
+    Parameters:
+     - key_seq
+    """
+    pass
+
+  def get_master_keys(self):
+    pass
+
   def get_open_txns(self):
     pass
 
@@ -5306,6 +5356,252 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o1
     return
 
+  def add_token(self, token_identifier, delegation_token):
+    """
+    Parameters:
+     - token_identifier
+     - delegation_token
+    """
+    self.send_add_token(token_identifier, delegation_token)
+    return self.recv_add_token()
+
+  def send_add_token(self, token_identifier, delegation_token):
+    self._oprot.writeMessageBegin('add_token', TMessageType.CALL, self._seqid)
+    args = add_token_args()
+    args.token_identifier = token_identifier
+    args.delegation_token = delegation_token
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_add_token(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = add_token_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "add_token failed: unknown result")
+
+  def remove_token(self, token_identifier):
+    """
+    Parameters:
+     - token_identifier
+    """
+    self.send_remove_token(token_identifier)
+    return self.recv_remove_token()
+
+  def send_remove_token(self, token_identifier):
+    self._oprot.writeMessageBegin('remove_token', TMessageType.CALL, self._seqid)
+    args = remove_token_args()
+    args.token_identifier = token_identifier
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_remove_token(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = remove_token_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "remove_token failed: unknown result")
+
+  def get_token(self, token_identifier):
+    """
+    Parameters:
+     - token_identifier
+    """
+    self.send_get_token(token_identifier)
+    return self.recv_get_token()
+
+  def send_get_token(self, token_identifier):
+    self._oprot.writeMessageBegin('get_token', TMessageType.CALL, self._seqid)
+    args = get_token_args()
+    args.token_identifier = token_identifier
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_token(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_token_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_token failed: unknown result")
+
+  def get_all_token_identifiers(self):
+    self.send_get_all_token_identifiers()
+    return self.recv_get_all_token_identifiers()
+
+  def send_get_all_token_identifiers(self):
+    self._oprot.writeMessageBegin('get_all_token_identifiers', TMessageType.CALL, self._seqid)
+    args = get_all_token_identifiers_args()
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_all_token_identifiers(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_all_token_identifiers_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_token_identifiers failed: unknown result")
+
+  def add_master_key(self, key):
+    """
+    Parameters:
+     - key
+    """
+    self.send_add_master_key(key)
+    return self.recv_add_master_key()
+
+  def send_add_master_key(self, key):
+    self._oprot.writeMessageBegin('add_master_key', TMessageType.CALL, self._seqid)
+    args = add_master_key_args()
+    args.key = key
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_add_master_key(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = add_master_key_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    if result.o1 is not None:
+      raise result.o1
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "add_master_key failed: unknown result")
+
+  def update_master_key(self, seq_number, key):
+    """
+    Parameters:
+     - seq_number
+     - key
+    """
+    self.send_update_master_key(seq_number, key)
+    self.recv_update_master_key()
+
+  def send_update_master_key(self, seq_number, key):
+    self._oprot.writeMessageBegin('update_master_key', TMessageType.CALL, self._seqid)
+    args = update_master_key_args()
+    args.seq_number = seq_number
+    args.key = key
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_update_master_key(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = update_master_key_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.o1 is not None:
+      raise result.o1
+    if result.o2 is not None:
+      raise result.o2
+    return
+
+  def remove_master_key(self, key_seq):
+    """
+    Parameters:
+     - key_seq
+    """
+    self.send_remove_master_key(key_seq)
+    return self.recv_remove_master_key()
+
+  def send_remove_master_key(self, key_seq):
+    self._oprot.writeMessageBegin('remove_master_key', TMessageType.CALL, self._seqid)
+    args = remove_master_key_args()
+    args.key_seq = key_seq
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_remove_master_key(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = remove_master_key_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "remove_master_key failed: unknown result")
+
+  def get_master_keys(self):
+    self.send_get_master_keys()
+    return self.recv_get_master_keys()
+
+  def send_get_master_keys(self):
+    self._oprot.writeMessageBegin('get_master_keys', TMessageType.CALL, self._seqid)
+    args = get_master_keys_args()
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_master_keys(self):
+    iprot = self._iprot
+    (fname, mtype, rseqid) = iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(iprot)
+      iprot.readMessageEnd()
+      raise x
+    result = get_master_keys_result()
+    result.read(iprot)
+    iprot.readMessageEnd()
+    if result.success is not None:
+      return result.success
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_master_keys failed: unknown result")
+
   def get_open_txns(self):
     self.send_get_open_txns()
     return self.recv_get_open_txns()
@@ -6161,6 +6457,14 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     self._processMap["get_delegation_token"] = Processor.process_get_delegation_token
     self._processMap["renew_delegation_token"] = Processor.process_renew_delegation_token
     self._processMap["cancel_delegation_token"] = Processor.process_cancel_delegation_token
+    self._processMap["add_token"] = Processor.process_add_token
+    self._processMap["remove_token"] = Processor.process_remove_token
+    self._processMap["get_token"] = Processor.process_get_token
+    self._processMap["get_all_token_identifiers"] = Processor.process_get_all_token_identifiers
+    self._processMap["add_master_key"] = Processor.process_add_master_key
+    self._processMap["update_master_key"] = Processor.process_update_master_key
+    self._processMap["remove_master_key"] = Processor.process_remove_master_key
+    self._processMap["get_master_keys"] = Processor.process_get_master_keys
     self._processMap["get_open_txns"] = Processor.process_get_open_txns
     self._processMap["get_open_txns_info"] = Processor.process_get_open_txns_info
     self._processMap["open_txns"] = Processor.process_open_txns
@@ -9027,13 +9331,13 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_get_open_txns(self, seqid, iprot, oprot):
-    args = get_open_txns_args()
+  def process_add_token(self, seqid, iprot, oprot):
+    args = add_token_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = get_open_txns_result()
+    result = add_token_result()
     try:
-      result.success = self._handler.get_open_txns()
+      result.success = self._handler.add_token(args.token_identifier, args.delegation_token)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
@@ -9041,18 +9345,18 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("get_open_txns", msg_type, seqid)
+    oprot.writeMessageBegin("add_token", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_get_open_txns_info(self, seqid, iprot, oprot):
-    args = get_open_txns_info_args()
+  def process_remove_token(self, seqid, iprot, oprot):
+    args = remove_token_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = get_open_txns_info_result()
+    result = remove_token_result()
     try:
-      result.success = self._handler.get_open_txns_info()
+      result.success = self._handler.remove_token(args.token_identifier)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
@@ -9060,18 +9364,18 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("get_open_txns_info", msg_type, seqid)
+    oprot.writeMessageBegin("remove_token", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_open_txns(self, seqid, iprot, oprot):
-    args = open_txns_args()
+  def process_get_token(self, seqid, iprot, oprot):
+    args = get_token_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = open_txns_result()
+    result = get_token_result()
     try:
-      result.success = self._handler.open_txns(args.rqst)
+      result.success = self._handler.get_token(args.token_identifier)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
@@ -9079,112 +9383,273 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("open_txns", msg_type, seqid)
+    oprot.writeMessageBegin("get_token", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_abort_txn(self, seqid, iprot, oprot):
-    args = abort_txn_args()
+  def process_get_all_token_identifiers(self, seqid, iprot, oprot):
+    args = get_all_token_identifiers_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = abort_txn_result()
+    result = get_all_token_identifiers_result()
     try:
-      self._handler.abort_txn(args.rqst)
+      result.success = self._handler.get_all_token_identifiers()
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
-    except NoSuchTxnException as o1:
-      msg_type = TMessageType.REPLY
-      result.o1 = o1
     except Exception as ex:
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("abort_txn", msg_type, seqid)
+    oprot.writeMessageBegin("get_all_token_identifiers", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_commit_txn(self, seqid, iprot, oprot):
-    args = commit_txn_args()
+  def process_add_master_key(self, seqid, iprot, oprot):
+    args = add_master_key_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = commit_txn_result()
+    result = add_master_key_result()
     try:
-      self._handler.commit_txn(args.rqst)
+      result.success = self._handler.add_master_key(args.key)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
-    except NoSuchTxnException as o1:
+    except MetaException as o1:
       msg_type = TMessageType.REPLY
       result.o1 = o1
-    except TxnAbortedException as o2:
-      msg_type = TMessageType.REPLY
-      result.o2 = o2
     except Exception as ex:
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("commit_txn", msg_type, seqid)
+    oprot.writeMessageBegin("add_master_key", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_lock(self, seqid, iprot, oprot):
-    args = lock_args()
+  def process_update_master_key(self, seqid, iprot, oprot):
+    args = update_master_key_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = lock_result()
+    result = update_master_key_result()
     try:
-      result.success = self._handler.lock(args.rqst)
+      self._handler.update_master_key(args.seq_number, args.key)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
-    except NoSuchTxnException as o1:
+    except NoSuchObjectException as o1:
       msg_type = TMessageType.REPLY
       result.o1 = o1
-    except TxnAbortedException as o2:
+    except MetaException as o2:
       msg_type = TMessageType.REPLY
       result.o2 = o2
     except Exception as ex:
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("lock", msg_type, seqid)
+    oprot.writeMessageBegin("update_master_key", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_check_lock(self, seqid, iprot, oprot):
-    args = check_lock_args()
+  def process_remove_master_key(self, seqid, iprot, oprot):
+    args = remove_master_key_args()
     args.read(iprot)
     iprot.readMessageEnd()
-    result = check_lock_result()
+    result = remove_master_key_result()
     try:
-      result.success = self._handler.check_lock(args.rqst)
+      result.success = self._handler.remove_master_key(args.key_seq)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
-    except NoSuchTxnException as o1:
-      msg_type = TMessageType.REPLY
-      result.o1 = o1
-    except TxnAbortedException as o2:
-      msg_type = TMessageType.REPLY
-      result.o2 = o2
-    except NoSuchLockException as o3:
-      msg_type = TMessageType.REPLY
-      result.o3 = o3
     except Exception as ex:
       msg_type = TMessageType.EXCEPTION
       logging.exception(ex)
       result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
-    oprot.writeMessageBegin("check_lock", msg_type, seqid)
+    oprot.writeMessageBegin("remove_master_key", msg_type, seqid)
     result.write(oprot)
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
-  def process_unlock(self, seqid, iprot, oprot):
+  def process_get_master_keys(self, seqid, iprot, oprot):
+    args = get_master_keys_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_master_keys_result()
+    try:
+      result.success = self._handler.get_master_keys()
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_master_keys", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_open_txns(self, seqid, iprot, oprot):
+    args = get_open_txns_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_open_txns_result()
+    try:
+      result.success = self._handler.get_open_txns()
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_open_txns", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_open_txns_info(self, seqid, iprot, oprot):
+    args = get_open_txns_info_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_open_txns_info_result()
+    try:
+      result.success = self._handler.get_open_txns_info()
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("get_open_txns_info", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_open_txns(self, seqid, iprot, oprot):
+    args = open_txns_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = open_txns_result()
+    try:
+      result.success = self._handler.open_txns(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("open_txns", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_abort_txn(self, seqid, iprot, oprot):
+    args = abort_txn_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = abort_txn_result()
+    try:
+      self._handler.abort_txn(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchTxnException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("abort_txn", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_commit_txn(self, seqid, iprot, oprot):
+    args = commit_txn_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = commit_txn_result()
+    try:
+      self._handler.commit_txn(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchTxnException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except TxnAbortedException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("commit_txn", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_lock(self, seqid, iprot, oprot):
+    args = lock_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = lock_result()
+    try:
+      result.success = self._handler.lock(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchTxnException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except TxnAbortedException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("lock", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_check_lock(self, seqid, iprot, oprot):
+    args = check_lock_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = check_lock_result()
+    try:
+      result.success = self._handler.check_lock(args.rqst)
+      msg_type = TMessageType.REPLY
+    except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
+      raise
+    except NoSuchTxnException as o1:
+      msg_type = TMessageType.REPLY
+      result.o1 = o1
+    except TxnAbortedException as o2:
+      msg_type = TMessageType.REPLY
+      result.o2 = o2
+    except NoSuchLockException as o3:
+      msg_type = TMessageType.REPLY
+      result.o3 = o3
+    except Exception as ex:
+      msg_type = TMessageType.EXCEPTION
+      logging.exception(ex)
+      result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
+    oprot.writeMessageBegin("check_lock", msg_type, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_unlock(self, seqid, iprot, oprot):
     args = unlock_args()
     args.read(iprot)
     iprot.readMessageEnd()
@@ -29618,6 +30083,1072 @@ class cancel_delegation_token_result:
   def __ne__(self, other):
     return not (self == other)
 
+class add_token_args:
+  """
+  Attributes:
+   - token_identifier
+   - delegation_token
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'token_identifier', None, None, ), # 1
+    (2, TType.STRING, 'delegation_token', None, None, ), # 2
+  )
+
+  def __init__(self, token_identifier=None, delegation_token=None,):
+    self.token_identifier = token_identifier
+    self.delegation_token = delegation_token
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.token_identifier = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.delegation_token = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('add_token_args')
+    if self.token_identifier is not None:
+      oprot.writeFieldBegin('token_identifier', TType.STRING, 1)
+      oprot.writeString(self.token_identifier)
+      oprot.writeFieldEnd()
+    if self.delegation_token is not None:
+      oprot.writeFieldBegin('delegation_token', TType.STRING, 2)
+      oprot.writeString(self.delegation_token)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.token_identifier)
+    value = (value * 31) ^ hash(self.delegation_token)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class add_token_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.BOOL, 'success', None, None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.BOOL:
+          self.success = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('add_token_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.BOOL, 0)
+      oprot.writeBool(self.success)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class remove_token_args:
+  """
+  Attributes:
+   - token_identifier
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'token_identifier', None, None, ), # 1
+  )
+
+  def __init__(self, token_identifier=None,):
+    self.token_identifier = token_identifier
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.token_identifier = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('remove_token_args')
+    if self.token_identifier is not None:
+      oprot.writeFieldBegin('token_identifier', TType.STRING, 1)
+      oprot.writeString(self.token_identifier)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.token_identifier)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class remove_token_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.BOOL, 'success', None, None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.BOOL:
+          self.success = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('remove_token_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.BOOL, 0)
+      oprot.writeBool(self.success)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_token_args:
+  """
+  Attributes:
+   - token_identifier
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'token_identifier', None, None, ), # 1
+  )
+
+  def __init__(self, token_identifier=None,):
+    self.token_identifier = token_identifier
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.token_identifier = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_token_args')
+    if self.token_identifier is not None:
+      oprot.writeFieldBegin('token_identifier', TType.STRING, 1)
+      oprot.writeString(self.token_identifier)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.token_identifier)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_token_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.STRING, 'success', None, None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.STRING:
+          self.success = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_token_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.STRING, 0)
+      oprot.writeString(self.success)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_all_token_identifiers_args:
+
+  thrift_spec = (
+  )
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_all_token_identifiers_args')
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_all_token_identifiers_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype950, _size947) = iprot.readListBegin()
+          for _i951 in xrange(_size947):
+            _elem952 = iprot.readString()
+            self.success.append(_elem952)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_all_token_identifiers_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRING, len(self.success))
+      for iter953 in self.success:
+        oprot.writeString(iter953)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class add_master_key_args:
+  """
+  Attributes:
+   - key
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'key', None, None, ), # 1
+  )
+
+  def __init__(self, key=None,):
+    self.key = key
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.key = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('add_master_key_args')
+    if self.key is not None:
+      oprot.writeFieldBegin('key', TType.STRING, 1)
+      oprot.writeString(self.key)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.key)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class add_master_key_result:
+  """
+  Attributes:
+   - success
+   - o1
+  """
+
+  thrift_spec = (
+    (0, TType.I32, 'success', None, None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, success=None, o1=None,):
+    self.success = success
+    self.o1 = o1
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.I32:
+          self.success = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('add_master_key_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.I32, 0)
+      oprot.writeI32(self.success)
+      oprot.writeFieldEnd()
+    if self.o1 is not None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    value = (value * 31) ^ hash(self.o1)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class update_master_key_args:
+  """
+  Attributes:
+   - seq_number
+   - key
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'seq_number', None, None, ), # 1
+    (2, TType.STRING, 'key', None, None, ), # 2
+  )
+
+  def __init__(self, seq_number=None, key=None,):
+    self.seq_number = seq_number
+    self.key = key
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.seq_number = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.key = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('update_master_key_args')
+    if self.seq_number is not None:
+      oprot.writeFieldBegin('seq_number', TType.I32, 1)
+      oprot.writeI32(self.seq_number)
+      oprot.writeFieldEnd()
+    if self.key is not None:
+      oprot.writeFieldBegin('key', TType.STRING, 2)
+      oprot.writeString(self.key)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.seq_number)
+    value = (value * 31) ^ hash(self.key)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class update_master_key_result:
+  """
+  Attributes:
+   - o1
+   - o2
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1
+    (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2
+  )
+
+  def __init__(self, o1=None, o2=None,):
+    self.o1 = o1
+    self.o2 = o2
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = NoSuchObjectException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRUCT:
+          self.o2 = MetaException()
+          self.o2.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('update_master_key_result')
+    if self.o1 is not None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    if self.o2 is not None:
+      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
+      self.o2.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.o1)
+    value = (value * 31) ^ hash(self.o2)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class remove_master_key_args:
+  """
+  Attributes:
+   - key_seq
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.I32, 'key_seq', None, None, ), # 1
+  )
+
+  def __init__(self, key_seq=None,):
+    self.key_seq = key_seq
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.I32:
+          self.key_seq = iprot.readI32()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('remove_master_key_args')
+    if self.key_seq is not None:
+      oprot.writeFieldBegin('key_seq', TType.I32, 1)
+      oprot.writeI32(self.key_seq)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.key_seq)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class remove_master_key_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.BOOL, 'success', None, None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.BOOL:
+          self.success = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('remove_master_key_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.BOOL, 0)
+      oprot.writeBool(self.success)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_master_keys_args:
+
+  thrift_spec = (
+  )
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_master_keys_args')
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_master_keys_result:
+  """
+  Attributes:
+   - success
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+  )
+
+  def __init__(self, success=None,):
+    self.success = success
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype957, _size954) = iprot.readListBegin()
+          for _i958 in xrange(_size954):
+            _elem959 = iprot.readString()
+            self.success.append(_elem959)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_master_keys_result')
+    if self.success is not None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRING, len(self.success))
+      for iter960 in self.success:
+        oprot.writeString(iter960)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.success)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class get_open_txns_args:
 
   thrift_spec = (

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index de316ae..e782bb5 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -1894,6 +1894,128 @@ module ThriftHiveMetastore
       return
     end
 
+    def add_token(token_identifier, delegation_token)
+      send_add_token(token_identifier, delegation_token)
+      return recv_add_token()
+    end
+
+    def send_add_token(token_identifier, delegation_token)
+      send_message('add_token', Add_token_args, :token_identifier => token_identifier, :delegation_token => delegation_token)
+    end
+
+    def recv_add_token()
+      result = receive_message(Add_token_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_token failed: unknown result')
+    end
+
+    def remove_token(token_identifier)
+      send_remove_token(token_identifier)
+      return recv_remove_token()
+    end
+
+    def send_remove_token(token_identifier)
+      send_message('remove_token', Remove_token_args, :token_identifier => token_identifier)
+    end
+
+    def recv_remove_token()
+      result = receive_message(Remove_token_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'remove_token failed: unknown result')
+    end
+
+    def get_token(token_identifier)
+      send_get_token(token_identifier)
+      return recv_get_token()
+    end
+
+    def send_get_token(token_identifier)
+      send_message('get_token', Get_token_args, :token_identifier => token_identifier)
+    end
+
+    def recv_get_token()
+      result = receive_message(Get_token_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_token failed: unknown result')
+    end
+
+    def get_all_token_identifiers()
+      send_get_all_token_identifiers()
+      return recv_get_all_token_identifiers()
+    end
+
+    def send_get_all_token_identifiers()
+      send_message('get_all_token_identifiers', Get_all_token_identifiers_args)
+    end
+
+    def recv_get_all_token_identifiers()
+      result = receive_message(Get_all_token_identifiers_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_token_identifiers failed: unknown result')
+    end
+
+    def add_master_key(key)
+      send_add_master_key(key)
+      return recv_add_master_key()
+    end
+
+    def send_add_master_key(key)
+      send_message('add_master_key', Add_master_key_args, :key => key)
+    end
+
+    def recv_add_master_key()
+      result = receive_message(Add_master_key_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_master_key failed: unknown result')
+    end
+
+    def update_master_key(seq_number, key)
+      send_update_master_key(seq_number, key)
+      recv_update_master_key()
+    end
+
+    def send_update_master_key(seq_number, key)
+      send_message('update_master_key', Update_master_key_args, :seq_number => seq_number, :key => key)
+    end
+
+    def recv_update_master_key()
+      result = receive_message(Update_master_key_result)
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      return
+    end
+
+    def remove_master_key(key_seq)
+      send_remove_master_key(key_seq)
+      return recv_remove_master_key()
+    end
+
+    def send_remove_master_key(key_seq)
+      send_message('remove_master_key', Remove_master_key_args, :key_seq => key_seq)
+    end
+
+    def recv_remove_master_key()
+      result = receive_message(Remove_master_key_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'remove_master_key failed: unknown result')
+    end
+
+    def get_master_keys()
+      send_get_master_keys()
+      return recv_get_master_keys()
+    end
+
+    def send_get_master_keys()
+      send_message('get_master_keys', Get_master_keys_args)
+    end
+
+    def recv_get_master_keys()
+      result = receive_message(Get_master_keys_result)
+      return result.success unless result.success.nil?
+      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_master_keys failed: unknown result')
+    end
+
     def get_open_txns()
       send_get_open_txns()
       return recv_get_open_txns()
@@ -3744,6 +3866,72 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'cancel_delegation_token', seqid)
     end
 
+    def process_add_token(seqid, iprot, oprot)
+      args = read_args(iprot, Add_token_args)
+      result = Add_token_result.new()
+      result.success = @handler.add_token(args.token_identifier, args.delegation_token)
+      write_result(result, oprot, 'add_token', seqid)
+    end
+
+    def process_remove_token(seqid, iprot, oprot)
+      args = read_args(iprot, Remove_token_args)
+      result = Remove_token_result.new()
+      result.success = @handler.remove_token(args.token_identifier)
+      write_result(result, oprot, 'remove_token', seqid)
+    end
+
+    def process_get_token(seqid, iprot, oprot)
+      args = read_args(iprot, Get_token_args)
+      result = Get_token_result.new()
+      result.success = @handler.get_token(args.token_identifier)
+      write_result(result, oprot, 'get_token', seqid)
+    end
+
+    def process_get_all_token_identifiers(seqid, iprot, oprot)
+      args = read_args(iprot, Get_all_token_identifiers_args)
+      result = Get_all_token_identifiers_result.new()
+      result.success = @handler.get_all_token_identifiers()
+      write_result(result, oprot, 'get_all_token_identifiers', seqid)
+    end
+
+    def process_add_master_key(seqid, iprot, oprot)
+      args = read_args(iprot, Add_master_key_args)
+      result = Add_master_key_result.new()
+      begin
+        result.success = @handler.add_master_key(args.key)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      end
+      write_result(result, oprot, 'add_master_key', seqid)
+    end
+
+    def process_update_master_key(seqid, iprot, oprot)
+      args = read_args(iprot, Update_master_key_args)
+      result = Update_master_key_result.new()
+      begin
+        @handler.update_master_key(args.seq_number, args.key)
+      rescue ::NoSuchObjectException => o1
+        result.o1 = o1
+      rescue ::MetaException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'update_master_key', seqid)
+    end
+
+    def process_remove_master_key(seqid, iprot, oprot)
+      args = read_args(iprot, Remove_master_key_args)
+      result = Remove_master_key_result.new()
+      result.success = @handler.remove_master_key(args.key_seq)
+      write_result(result, oprot, 'remove_master_key', seqid)
+    end
+
+    def process_get_master_keys(seqid, iprot, oprot)
+      args = read_args(iprot, Get_master_keys_args)
+      result = Get_master_keys_result.new()
+      result.success = @handler.get_master_keys()
+      write_result(result, oprot, 'get_master_keys', seqid)
+    end
+
     def process_get_open_txns(seqid, iprot, oprot)
       args = read_args(iprot, Get_open_txns_args)
       result = Get_open_txns_result.new()
@@ -8296,6 +8484,268 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Add_token_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    TOKEN_IDENTIFIER = 1
+    DELEGATION_TOKEN = 2
+
+    FIELDS = {
+      TOKEN_IDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'token_identifier'},
+      DELEGATION_TOKEN => {:type => ::Thrift::Types::STRING, :name => 'delegation_token'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_token_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Remove_token_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    TOKEN_IDENTIFIER = 1
+
+    FIELDS = {
+      TOKEN_IDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'token_identifier'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Remove_token_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_token_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    TOKEN_IDENTIFIER = 1
+
+    FIELDS = {
+      TOKEN_IDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'token_identifier'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_token_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRING, :name => 'success'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_all_token_identifiers_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+
+    FIELDS = {
+
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_all_token_identifiers_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_master_key_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    KEY = 1
+
+    FIELDS = {
+      KEY => {:type => ::Thrift::Types::STRING, :name => 'key'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Add_master_key_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::I32, :name => 'success'},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Update_master_key_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SEQ_NUMBER = 1
+    KEY = 2
+
+    FIELDS = {
+      SEQ_NUMBER => {:type => ::Thrift::Types::I32, :name => 'seq_number'},
+      KEY => {:type => ::Thrift::Types::STRING, :name => 'key'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Update_master_key_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Remove_master_key_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    KEY_SEQ = 1
+
+    FIELDS = {
+      KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Remove_master_key_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_master_keys_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+
+    FIELDS = {
+
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_master_keys_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_open_txns_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
 

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 50b38fa..0e8a157 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5271,6 +5271,165 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
+    public boolean add_token(String token_identifier, String delegation_token) throws TException {
+      startFunction("add_token", ": " + token_identifier);
+      boolean ret = false;
+      Exception ex = null;
+      try {
+        ret = getMS().addToken(token_identifier, delegation_token);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("add_token", ret == true, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public boolean remove_token(String token_identifier) throws TException {
+      startFunction("remove_token", ": " + token_identifier);
+      boolean ret = false;
+      Exception ex = null;
+      try {
+        ret = getMS().removeToken(token_identifier);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("remove_token", ret == true, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public String get_token(String token_identifier) throws TException {
+      startFunction("get_token for", ": " + token_identifier);
+      String ret = null;
+      Exception ex = null;
+      try {
+        ret = getMS().getToken(token_identifier);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("get_token", ret != null, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public List<String> get_all_token_identifiers() throws TException {
+      startFunction("get_all_token_identifiers.");
+      List<String> ret = null;
+      Exception ex = null;
+      try {
+        ret = getMS().getAllTokenIdentifiers();
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("get_all_token_identifiers.", ex == null, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public int add_master_key(String key) throws MetaException, TException {
+      startFunction("add_master_key.");
+      int ret = -1;
+      Exception ex = null;
+      try {
+        ret = getMS().addMasterKey(key);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("add_master_key.", ex == null, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public void update_master_key(int seq_number, String key) throws NoSuchObjectException,
+      MetaException, TException {
+      startFunction("update_master_key.");
+      Exception ex = null;
+      try {
+        getMS().updateMasterKey(seq_number, key);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("update_master_key.", ex == null, ex);
+      }
+    }
+
+    @Override
+    public boolean remove_master_key(int key_seq) throws TException {
+      startFunction("remove_master_key.");
+      Exception ex = null;
+      boolean ret;
+      try {
+        ret = getMS().removeMasterKey(key_seq);
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("remove_master_key.", ex == null, ex);
+      }
+      return ret;
+    }
+
+    @Override
+    public List<String> get_master_keys() throws TException {
+      startFunction("get_master_keys.");
+      Exception ex = null;
+      String [] ret = null;
+      try {
+        ret = getMS().getMasterKeys();
+      } catch (Exception e) {
+        ex = e;
+        if (e instanceof MetaException) {
+          throw (MetaException) e;
+        } else {
+          throw newMetaException(e);
+        }
+      } finally {
+        endFunction("get_master_keys.", ret != null, ex);
+      }
+      return Arrays.asList(ret);
+    }
+
+    @Override
     public void markPartitionForEvent(final String db_name, final String tbl_name,
         final Map<String, String> partName, final PartitionEventType evtType) throws
         MetaException, TException, NoSuchObjectException, UnknownDBException,

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index b5c4d1d..9048d45 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1938,6 +1938,48 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
   }
 
   @Override
+  public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+     return client.add_token(tokenIdentifier, delegationToken);
+  }
+
+  @Override
+  public boolean removeToken(String tokenIdentifier) throws TException {
+    return client.remove_token(tokenIdentifier);
+  }
+
+  @Override
+  public String getToken(String tokenIdentifier) throws TException {
+    return client.get_token(tokenIdentifier);
+  }
+
+  @Override
+  public List<String> getAllTokenIdentifiers() throws TException {
+    return client.get_all_token_identifiers();
+  }
+
+  @Override
+  public int addMasterKey(String key) throws MetaException, TException {
+    return client.add_master_key(key);
+  }
+
+  @Override
+  public void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException, TException {
+    client.update_master_key(seqNo, key);
+  }
+
+  @Override
+  public boolean removeMasterKey(Integer keySeq) throws TException {
+    return client.remove_master_key(keySeq);
+  }
+
+  @Override
+  public String[] getMasterKeys() throws TException {
+    List<String> keyList = client.get_master_keys();
+    return keyList.toArray(new String[keyList.size()]);
+  }
+
+  @Override
   public ValidTxnList getValidTxns() throws TException {
     return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index cb092d1..62677d1 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -1165,7 +1165,24 @@ public interface IMetaStoreClient {
    */
   void cancelDelegationToken(String tokenStrForm) throws MetaException, TException;
 
-  public String getTokenStrForm() throws IOException;
+  String getTokenStrForm() throws IOException;
+
+  boolean addToken(String tokenIdentifier, String delegationToken) throws TException;
+
+  boolean removeToken(String tokenIdentifier) throws TException;
+
+  String getToken(String tokenIdentifier) throws TException;
+
+  List<String> getAllTokenIdentifiers() throws TException;
+
+  int addMasterKey(String key) throws MetaException, TException;
+
+  void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException, TException;
+
+  boolean removeMasterKey(Integer keySeq) throws TException;
+
+  String[] getMasterKeys() throws TException;
 
   void createFunction(Function func)
       throws InvalidObjectException, MetaException, TException;

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote b/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
index 3d5e3a4..6992f62 100644
--- a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
+++ b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
@@ -36,9 +36,9 @@ import javax.security.sasl.Sasl;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.DBTokenStore;
@@ -48,7 +48,6 @@ import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hive.service.ServiceException;
 import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.thrift.ThriftCLIService;
 import org.apache.thrift.TProcessorFactory;
@@ -105,7 +104,7 @@ public class HiveAuthFactory {
     transportMode = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE);
     authTypeStr = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION);
 
-    // ShimLoader.getHadoopShims().isSecurityEnabled() will only check that·
+    // ShimLoader.getHadoopShims().isSecurityEnabled() will only check that
     // hadoopAuth is not simple, it does not guarantee it is kerberos
     hadoopAuth = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
 
@@ -127,18 +126,26 @@ public class HiveAuthFactory {
       // Start delegation token manager
       delegationTokenManager = new HiveDelegationTokenManager();
       try {
-        // baseHandler is only necessary for DBTokenStore
-        HMSHandler baseHandler = null;
-        String tokenStoreClass =
-            conf.getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS);
+        Object baseHandler = null;
+        String tokenStoreClass = conf.getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS);
+
         if (tokenStoreClass.equals(DBTokenStore.class.getName())) {
-          baseHandler = new HiveMetaStore.HMSHandler("New db based metastore server", conf, true);
+          // IMetaStoreClient is needed to access token store if DBTokenStore is to be used. It
+          // will be got via Hive.get(conf).getMSC in a thread where the DelegationTokenStore
+          // is called. To avoid the cyclic reference, we pass the Hive class to DBTokenStore where
+          // it is used to get a threadLocal Hive object with a synchronized MetaStoreClient using
+          // Java reflection.
+          // Note: there will be two HS2 life-long opened MSCs, one is stored in HS2 thread local
+          // Hive object, the other is in a daemon thread spawned in DelegationTokenSecretManager
+          // to remove expired tokens.
+          baseHandler = Hive.class;
         }
-        delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler,
-            ServerMode.HIVESERVER2);
+
+        delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, ServerMode.HIVESERVER2);
         saslServer.setSecretManager(delegationTokenManager.getSecretManager());
-      } catch (MetaException | IOException e) {
-        throw new ServiceException("Failed to start token manager", e);
+      }
+      catch (IOException e) {
+        throw new TTransportException("Failed to start token manager", e);
       }
     }
   }


[31/51] [abbrv] hive git commit: HIVE-13218 : LLAP: better configs part 1 (Sergey Shelukhin, reviewed by Gopal V)

Posted by jd...@apache.org.
HIVE-13218 : LLAP: better configs part 1 (Sergey Shelukhin, reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/428a930c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/428a930c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/428a930c

Branch: refs/heads/llap
Commit: 428a930c6581b734b65ae3710d1a7585bd95b5da
Parents: ceff062
Author: Sergey Shelukhin <se...@apache.org>
Authored: Fri Mar 11 19:09:21 2016 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Fri Mar 11 19:09:21 2016 -0800

----------------------------------------------------------------------
 .../hive/llap/cli/LlapOptionsProcessor.java     |  22 +++-
 .../hadoop/hive/llap/cli/LlapServiceDriver.java |   6 ++
 .../hive/ql/optimizer/physical/LlapDecider.java |  11 +-
 .../llap/bucket_map_join_tez1.q.out             |  10 +-
 .../llap/bucket_map_join_tez2.q.out             |   4 +-
 .../llap/dynamic_partition_pruning.q.out        | 102 +++++++++----------
 .../llap/dynamic_partition_pruning_2.q.out      |   8 +-
 .../llap/hybridgrace_hashjoin_1.q.out           |  20 ++--
 .../llap/hybridgrace_hashjoin_2.q.out           |  24 ++---
 .../clientpositive/llap/llap_nullscan.q.out     |   4 +-
 .../clientpositive/llap/llapdecider.q.out       |   2 +-
 .../clientpositive/llap/mapjoin_decimal.q.out   |   2 +-
 .../test/results/clientpositive/llap/mrr.q.out  |  10 +-
 .../llap/tez_bmj_schema_evolution.q.out         |   2 +-
 .../results/clientpositive/llap/tez_dml.q.out   |   2 +-
 .../llap/tez_dynpart_hashjoin_1.q.out           |  12 +--
 .../llap/tez_dynpart_hashjoin_2.q.out           |   6 +-
 .../clientpositive/llap/tez_join_hash.q.out     |   2 +-
 .../clientpositive/llap/tez_join_tests.q.out    |   2 +-
 .../clientpositive/llap/tez_joins_explain.q.out |   2 +-
 .../results/clientpositive/llap/tez_smb_1.q.out |   8 +-
 .../clientpositive/llap/tez_smb_main.q.out      |  20 ++--
 .../results/clientpositive/llap/tez_union.q.out |   4 +-
 .../clientpositive/llap/tez_union2.q.out        |   4 +-
 .../llap/tez_union_multiinsert.q.out            |  28 ++---
 .../llap/tez_vector_dynpart_hashjoin_1.q.out    |  12 +--
 .../llap/tez_vector_dynpart_hashjoin_2.q.out    |   6 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  96 ++++++++---------
 .../clientpositive/tez/llapdecider.q.out        |   2 +-
 29 files changed, 227 insertions(+), 206 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
index c43bf97..c292b37 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapOptionsProcessor.java
@@ -52,12 +52,14 @@ public class LlapOptionsProcessor {
   public static final String OPTION_HIVECONF = "hiveconf"; // llap-daemon-site if relevant parameter
   public static final String OPTION_SLIDER_AM_CONTAINER_MB = "slider-am-container-mb"; // forward as arg
   public static final String OPTION_LLAP_QUEUE = "queue"; // forward via config.json
+  public static final String OPTION_IO_THREADS = "iothreads"; // llap-daemon-site
 
   public class LlapOptions {
     private final int instances;
     private final String directory;
     private final String name;
     private final int executors;
+    private final int ioThreads;
     private final long cache;
     private final long size;
     private final long xmx;
@@ -67,8 +69,8 @@ public class LlapOptionsProcessor {
     private final String javaPath;
     private final String llapQueueName;
 
-    public LlapOptions(String name, int instances, String directory, int executors, long cache,
-                       long size, long xmx, String jars, boolean isHbase,
+    public LlapOptions(String name, int instances, String directory, int executors, int ioThreads,
+                       long cache, long size, long xmx, String jars, boolean isHbase,
                        @Nonnull Properties hiveconf, String javaPath, String llapQueueName)
         throws ParseException {
       if (instances <= 0) {
@@ -79,6 +81,7 @@ public class LlapOptionsProcessor {
       this.directory = directory;
       this.name = name;
       this.executors = executors;
+      this.ioThreads = ioThreads;
       this.cache = cache;
       this.size = size;
       this.xmx = xmx;
@@ -105,6 +108,10 @@ public class LlapOptionsProcessor {
       return executors;
     }
 
+    public int getIoThreads() {
+      return ioThreads;
+    }
+
     public long getCache() {
       return cache;
     }
@@ -202,6 +209,9 @@ public class LlapOptionsProcessor {
         .withLongOpt(OPTION_SLIDER_AM_CONTAINER_MB)
         .withDescription("The size of the slider AppMaster container in MB").create());
 
+    options.addOption(OptionBuilder.hasArg().withArgName(OPTION_IO_THREADS)
+        .withLongOpt(OPTION_IO_THREADS).withDescription("executor per instance").create('t'));
+
     // [-H|--help]
     options.addOption(new Option("H", "help", false, "Print help information"));
   }
@@ -225,6 +235,9 @@ public class LlapOptionsProcessor {
     String name = commandLine.getOptionValue(OPTION_NAME, null);
 
     final int executors = Integer.parseInt(commandLine.getOptionValue(OPTION_EXECUTORS, "-1"));
+    // TODO# here
+    final int ioThreads = Integer.parseInt(
+        commandLine.getOptionValue(OPTION_IO_THREADS, Integer.toString(executors)));
     final long cache = parseSuffixed(commandLine.getOptionValue(OPTION_CACHE, "-1"));
     final long size = parseSuffixed(commandLine.getOptionValue(OPTION_SIZE, "-1"));
     final long xmx = parseSuffixed(commandLine.getOptionValue(OPTION_XMX, "-1"));
@@ -248,9 +261,8 @@ public class LlapOptionsProcessor {
 
     // loglevel, chaosmonkey & args are parsed by the python processor
 
-    return new LlapOptions(
-        name, instances, directory, executors, cache, size, xmx, jars, isHbase, hiveconf, javaHome,
-        queueName);
+    return new LlapOptions(name, instances, directory, executors, ioThreads, cache,
+        size, xmx, jars, isHbase, hiveconf, javaHome, queueName);
   }
 
   private void printUsage() {

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
index f6ca79d..508ce27 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/LlapServiceDriver.java
@@ -208,6 +208,12 @@ public class LlapServiceDriver {
       // TODO: vcpu settings - possibly when DRFA works right
     }
 
+    if (options.getIoThreads() != -1) {
+      conf.setLong(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname, options.getIoThreads());
+      propsDirectOptions.setProperty(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname,
+          String.valueOf(options.getIoThreads()));
+    }
+
     if (options.getCache() != -1) {
       conf.set(HiveConf.ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname,
           Long.toString(options.getCache()));

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
index 80dbd31..194828f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java
@@ -102,11 +102,14 @@ public class LlapDecider implements PhysicalPlanResolver {
     private final HiveConf conf;
     private final boolean doSkipUdfCheck;
     private final boolean arePermanentFnsAllowed;
+    private final boolean shouldUber;
 
-    public LlapDecisionDispatcher(PhysicalContext pctx) {
+    public LlapDecisionDispatcher(PhysicalContext pctx, LlapMode mode) {
       conf = pctx.getConf();
       doSkipUdfCheck = HiveConf.getBoolVar(conf, ConfVars.LLAP_SKIP_COMPILE_UDF_CHECK);
       arePermanentFnsAllowed = HiveConf.getBoolVar(conf, ConfVars.LLAP_DAEMON_ALLOW_PERMANENT_FNS);
+      // Don't user uber in "all" mode - everything can go into LLAP, which is better than uber.
+      shouldUber = HiveConf.getBoolVar(conf, ConfVars.LLAP_AUTO_ALLOW_UBER) && (mode != all);
     }
 
     @Override
@@ -133,7 +136,7 @@ public class LlapDecider implements PhysicalPlanResolver {
     private void convertWork(TezWork tezWork, BaseWork work)
       throws SemanticException {
 
-      if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_AUTO_ALLOW_UBER)) {
+      if (shouldUber) {
         // let's see if we can go one step further and just uber this puppy
         if (tezWork.getChildren(work).isEmpty()
             && work instanceof ReduceWork
@@ -420,7 +423,7 @@ public class LlapDecider implements PhysicalPlanResolver {
     this.conf = pctx.getConf();
 
     this.mode = LlapMode.valueOf(HiveConf.getVar(conf, HiveConf.ConfVars.LLAP_EXECUTION_MODE));
-    LOG.info("llap mode: "+this.mode);
+    LOG.info("llap mode: " + this.mode);
 
     if (mode == none) {
       LOG.info("LLAP disabled.");
@@ -428,7 +431,7 @@ public class LlapDecider implements PhysicalPlanResolver {
     }
 
     // create dispatcher and graph walker
-    Dispatcher disp = new LlapDecisionDispatcher(pctx);
+    Dispatcher disp = new LlapDecisionDispatcher(pctx, mode);
     TaskGraphWalker ogw = new TaskGraphWalker(disp);
 
     // get all the tasks nodes from root task

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
index 8f054f2..f5a036e 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out
@@ -281,7 +281,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -536,7 +536,7 @@ STAGE PLANS:
                         value expressions: _col1 (type: string)
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
@@ -678,7 +678,7 @@ STAGE PLANS:
                         value expressions: _col1 (type: string)
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: sum(VALUE._col0)
@@ -1602,7 +1602,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count()
@@ -1719,7 +1719,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 11 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count()

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
index 7d59781..2800ee5 100644
--- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
+++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez2.q.out
@@ -593,7 +593,7 @@ STAGE PLANS:
                         Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: int)
@@ -680,7 +680,7 @@ STAGE PLANS:
                         value expressions: _col1 (type: string)
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
index ace2960..05f05aa 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
                         Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -286,7 +286,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -398,7 +398,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -540,7 +540,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -652,7 +652,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -834,7 +834,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -989,7 +989,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1147,7 +1147,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1259,7 +1259,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1399,7 +1399,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1511,7 +1511,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1649,7 +1649,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1776,7 +1776,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1888,7 +1888,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2000,7 +2000,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2140,7 +2140,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2266,7 +2266,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2425,7 +2425,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2569,7 +2569,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2698,7 +2698,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2805,7 +2805,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2914,7 +2914,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3062,7 +3062,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3216,7 +3216,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3347,7 +3347,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3362,7 +3362,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3398,7 +3398,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 8 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3554,7 +3554,7 @@ STAGE PLANS:
                     Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3569,7 +3569,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3605,7 +3605,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 8 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3760,7 +3760,7 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
         Reducer 11 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3811,7 +3811,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 5
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3842,7 +3842,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3855,7 +3855,7 @@ STAGE PLANS:
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Reducer 8 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -4020,7 +4020,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4157,7 +4157,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4327,7 +4327,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4480,7 +4480,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4615,7 +4615,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4726,7 +4726,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4848,7 +4848,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4969,7 +4969,7 @@ STAGE PLANS:
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5134,7 +5134,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5221,7 +5221,7 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5310,7 +5310,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5446,7 +5446,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5589,7 +5589,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5710,7 +5710,7 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -5725,7 +5725,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -5761,7 +5761,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 7 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -5956,7 +5956,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
index 13b85d9..67c9aa5 100644
--- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning_2.q.out
@@ -249,7 +249,7 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
@@ -406,7 +406,7 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
@@ -790,7 +790,7 @@ STAGE PLANS:
                   Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0))
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0))
@@ -1102,7 +1102,7 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
index d611e8b..aaa8425 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out
@@ -100,7 +100,7 @@ STAGE PLANS:
                         Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -224,7 +224,7 @@ STAGE PLANS:
                         Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -343,7 +343,7 @@ STAGE PLANS:
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -463,7 +463,7 @@ STAGE PLANS:
                         Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -574,7 +574,7 @@ STAGE PLANS:
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -686,7 +686,7 @@ STAGE PLANS:
                       Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -837,7 +837,7 @@ STAGE PLANS:
                         Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -959,7 +959,7 @@ STAGE PLANS:
                         Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1080,7 +1080,7 @@ STAGE PLANS:
                         Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1202,7 +1202,7 @@ STAGE PLANS:
                         Statistics: Num rows: 525 Data size: 12474 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
index 3d5f5cc..7a00162 100644
--- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out
@@ -104,7 +104,7 @@ STAGE PLANS:
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -233,7 +233,7 @@ STAGE PLANS:
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -391,7 +391,7 @@ STAGE PLANS:
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -541,7 +541,7 @@ STAGE PLANS:
                       Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -751,7 +751,7 @@ STAGE PLANS:
                           value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -769,7 +769,7 @@ STAGE PLANS:
                     Map-reduce partition columns: _col0 (type: bigint)
                     Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: bigint)
@@ -784,7 +784,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 9 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -997,7 +997,7 @@ STAGE PLANS:
                           value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1015,7 +1015,7 @@ STAGE PLANS:
                     Map-reduce partition columns: _col0 (type: bigint)
                     Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: bigint)
@@ -1030,7 +1030,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 9 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1235,7 +1235,7 @@ STAGE PLANS:
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1427,7 +1427,7 @@ STAGE PLANS:
                       Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
index 947ff71..e17d721 100644
--- a/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/llap/llap_nullscan.q.out
@@ -323,7 +323,7 @@ STAGE PLANS:
             Execution mode: vectorized, llap
             LLAP IO: all inputs
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -338,7 +338,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/llapdecider.q.out b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
index 2b0e639..db3bf22 100644
--- a/ql/src/test/results/clientpositive/llap/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/llap/llapdecider.q.out
@@ -1008,7 +1008,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
index 1a58629..550a9be 100644
--- a/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
+++ b/ql/src/test/results/clientpositive/llap/mapjoin_decimal.q.out
@@ -143,7 +143,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1049 Data size: 117488 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: decimal(4,2)), VALUE._col0 (type: decimal(4,0))

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/mrr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/mrr.q.out b/ql/src/test/results/clientpositive/llap/mrr.q.out
index 3098935..72e27ab 100644
--- a/ql/src/test/results/clientpositive/llap/mrr.q.out
+++ b/ql/src/test/results/clientpositive/llap/mrr.q.out
@@ -58,7 +58,7 @@ STAGE PLANS:
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)
@@ -492,7 +492,7 @@ STAGE PLANS:
                     sort order: ++
                     Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
@@ -921,7 +921,7 @@ STAGE PLANS:
                     sort order: ++
                     Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint)
@@ -1451,7 +1451,7 @@ STAGE PLANS:
                     Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint)
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), KEY.reducesinkkey0 (type: string), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint)
@@ -1764,7 +1764,7 @@ STAGE PLANS:
                     Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col1 (type: bigint), _col2 (type: string), _col3 (type: string)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: string), VALUE._col2 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_bmj_schema_evolution.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_bmj_schema_evolution.q.out b/ql/src/test/results/clientpositive/llap/tez_bmj_schema_evolution.q.out
index 6ef6f03..330eada 100644
--- a/ql/src/test/results/clientpositive/llap/tez_bmj_schema_evolution.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_bmj_schema_evolution.q.out
@@ -131,7 +131,7 @@ STAGE PLANS:
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_dml.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dml.q.out b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
index 1487c90..de94d5b 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dml.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dml.q.out
@@ -61,7 +61,7 @@ STAGE PLANS:
                   Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: string)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
index 5de12b4..761fdac 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_1.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
                   Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
@@ -215,7 +215,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -360,7 +360,7 @@ STAGE PLANS:
                   Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: bigint)
@@ -494,7 +494,7 @@ STAGE PLANS:
                   Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
@@ -628,7 +628,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -776,7 +776,7 @@ STAGE PLANS:
                   Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out
index 50e1a4f..d302446 100644
--- a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_2.q.out
@@ -113,7 +113,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)
@@ -305,7 +305,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)
@@ -495,7 +495,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
index 0bcaa2c..78d43b8 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_hash.q.out
@@ -95,7 +95,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out b/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
index 95f2d9a..cb3f75a 100644
--- a/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_join_tests.q.out
@@ -116,7 +116,7 @@ STAGE PLANS:
                   Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out b/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
index 6765f64..0c1e695 100644
--- a/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_joins_explain.q.out
@@ -114,7 +114,7 @@ STAGE PLANS:
                   Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string)
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_smb_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_smb_1.q.out b/ql/src/test/results/clientpositive/llap/tez_smb_1.q.out
index 570118c..9acdeaf 100644
--- a/ql/src/test/results/clientpositive/llap/tez_smb_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_smb_1.q.out
@@ -167,7 +167,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -273,7 +273,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -428,7 +428,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -568,7 +568,7 @@ STAGE PLANS:
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_smb_main.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_smb_main.q.out b/ql/src/test/results/clientpositive/llap/tez_smb_main.q.out
index 7f33bb3..239cee5 100644
--- a/ql/src/test/results/clientpositive/llap/tez_smb_main.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_smb_main.q.out
@@ -308,7 +308,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -418,7 +418,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -528,7 +528,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -653,7 +653,7 @@ STAGE PLANS:
                         Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -764,7 +764,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -920,7 +920,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1011,7 +1011,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1145,7 +1145,7 @@ STAGE PLANS:
                         Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1326,7 +1326,7 @@ STAGE PLANS:
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1454,7 +1454,7 @@ STAGE PLANS:
                       Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                       value expressions: _col0 (type: bigint)
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union.q.out b/ql/src/test/results/clientpositive/llap/tez_union.q.out
index 4388bfc..761d2c8 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union.q.out
@@ -269,7 +269,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -780,7 +780,7 @@ STAGE PLANS:
                         Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_union2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union2.q.out b/ql/src/test/results/clientpositive/llap/tez_union2.q.out
index d08a102..85cda19 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union2.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union2.q.out
@@ -109,7 +109,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)
@@ -124,7 +124,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string), KEY._col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
index 9145a63..57f0e37 100644
--- a/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_union_multiinsert.q.out
@@ -150,7 +150,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -188,7 +188,7 @@ STAGE PLANS:
                         Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                         Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0)
@@ -205,7 +205,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest1
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -1054,7 +1054,7 @@ STAGE PLANS:
                           Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0)
@@ -1071,7 +1071,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest1
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -1088,7 +1088,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest2
         Reducer 6 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1950,7 +1950,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0)
@@ -1967,7 +1967,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest1
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -1984,7 +1984,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest2
         Reducer 6 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2801,7 +2801,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2835,7 +2835,7 @@ STAGE PLANS:
                       Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
                       Statistics: Num rows: 1 Data size: 464 Basic stats: COMPLETE Column stats: PARTIAL
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0)
@@ -2852,7 +2852,7 @@ STAGE PLANS:
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                       name: default.dest1
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col2:0._col0)
@@ -3634,7 +3634,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 272 Basic stats: COMPLETE Column stats: PARTIAL
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3693,7 +3693,7 @@ STAGE PLANS:
                           serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                           name: default.dest2
         Reducer 5 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(DISTINCT KEY._col1:0._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
index 17e2217..6b5e483 100644
--- a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_1.q.out
@@ -84,7 +84,7 @@ STAGE PLANS:
                   Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
@@ -215,7 +215,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -360,7 +360,7 @@ STAGE PLANS:
                   Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint)
         Reducer 4 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: bigint)
@@ -494,7 +494,7 @@ STAGE PLANS:
                   Statistics: Num rows: 6758 Data size: 1453080 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean), _col12 (type: tinyint), _col13 (type: smallint), _col14 (type: int), _col15 (type: bigint), _col16 (type: float), _col17 (type: double), _col18 (type: string), _col19 (type: string), _col20 (type: timestamp), _col21 (type: timestamp), _col22 (type: boolean), _col23 (type: boolean)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float), VALUE._col4 (type: double), VALUE._col5 (type: string), VALUE._col6 (type: string), VALUE._col7 (type: timestamp), VALUE._col8 (type: timestamp), VALUE._col9 (type: boolean), VALUE._col10 (type: boolean), VALUE._col11 (type: tinyint), VALUE._col12 (type: smallint), VALUE._col13 (type: int), VALUE._col14 (type: bigint), VALUE._col15 (type: float), VALUE._col16 (type: double), VALUE._col17 (type: string), VALUE._col18 (type: string), VALUE._col19 (type: timestamp), VALUE._col20 (type: timestamp), VALUE._col21 (type: boolean), VALUE._col22 (type: boolean)
@@ -628,7 +628,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -776,7 +776,7 @@ STAGE PLANS:
                   Statistics: Num rows: 3379 Data size: 726540 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col0 (type: smallint)
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: smallint), KEY.reducesinkkey0 (type: bigint)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out
index 2e8bb11..43e0404 100644
--- a/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_vector_dynpart_hashjoin_2.q.out
@@ -113,7 +113,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)
@@ -305,7 +305,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)
@@ -495,7 +495,7 @@ STAGE PLANS:
                   Statistics: Num rows: 9011 Data size: 1937438 Basic stats: COMPLETE Column stats: NONE
                   value expressions: _col3 (type: bigint), _col4 (type: float), _col5 (type: double), _col6 (type: string), _col7 (type: string), _col8 (type: timestamp), _col9 (type: timestamp), _col10 (type: boolean), _col11 (type: boolean)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Select Operator
                 expressions: KEY.reducesinkkey1 (type: tinyint), KEY.reducesinkkey0 (type: smallint), KEY.reducesinkkey2 (type: int), VALUE._col0 (type: bigint), VALUE._col1 (type: float), VALUE._col2 (type: double), VALUE._col3 (type: string), VALUE._col4 (type: string), VALUE._col5 (type: timestamp), VALUE._col6 (type: timestamp), VALUE._col7 (type: boolean), VALUE._col8 (type: boolean)


[37/51] [abbrv] hive git commit: HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)

Posted by jd...@apache.org.
HIVE-13201 : Compaction shouldn't be allowed on non-ACID table (Wei Zheng, reviewed by Alan Gates)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6af0124
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6af0124
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6af0124

Branch: refs/heads/llap
Commit: b6af0124b351ba759a15c81f8ececd7920115b2f
Parents: e7a1756
Author: Wei Zheng <we...@apache.org>
Authored: Mon Mar 14 14:34:28 2016 -0700
Committer: Wei Zheng <we...@apache.org>
Committed: Mon Mar 14 14:34:28 2016 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java      |  1 +
 ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java  |  4 ++++
 .../test/queries/clientnegative/compact_non_acid_table.q | 11 +++++++++++
 ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q   |  2 +-
 ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q   |  2 +-
 .../results/clientnegative/compact_non_acid_table.q.out  | 11 +++++++++++
 .../test/results/clientpositive/dbtxnmgr_compact1.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact2.q.out  |  4 ++--
 .../test/results/clientpositive/dbtxnmgr_compact3.q.out  |  4 ++--
 10 files changed, 36 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
index f0cc3a2..f091f67 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
@@ -397,6 +397,7 @@ public enum ErrorMsg {
   TOO_MANY_COMPACTION_PARTITIONS(10284, "Compaction can only be requested on one partition at a " +
       "time."),
   DISTINCT_NOT_SUPPORTED(10285, "Distinct keyword is not support in current context"),
+  NONACID_COMPACTION_NOT_SUPPORTED(10286, "Compaction is not allowed on non-ACID table {0}.{1}", true),
 
   UPDATEDELETE_PARSE_ERROR(10290, "Encountered parse error while parsing rewritten update or " +
       "delete query"),

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 2a64cfa..56eecf6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -1745,6 +1745,10 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
   private int compact(Hive db, AlterTableSimpleDesc desc) throws HiveException {
 
     Table tbl = db.getTable(desc.getTableName());
+    if (!AcidUtils.isAcidTable(tbl)) {
+      throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, tbl.getDbName(),
+          tbl.getTableName());
+    }
 
     String partName = null;
     if (desc.getPartSpec() == null) {

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientnegative/compact_non_acid_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/compact_non_acid_table.q b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
new file mode 100644
index 0000000..e9faa24
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/compact_non_acid_table.q
@@ -0,0 +1,11 @@
+set hive.mapred.mode=nonstrict;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat;
+
+
+create table not_an_acid_table (a int, b varchar(128));
+
+alter table not_an_acid_table compact 'major';
+
+drop table not_an_acid_table;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
index 7f71305..b86c6f9 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact1.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table T1(key string, val string) stored as textfile;
+create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true');
 
 alter table T1 compact 'major';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
index 4759d65..dca954e 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact2.q
@@ -1,7 +1,7 @@
 set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 
-create table T1(key string, val string) partitioned by (ds string) stored as textfile;
+create table T1(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true');
 
 alter table T1 add partition (ds = 'today');
 alter table T1 add partition (ds = 'yesterday');

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q b/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
index 23b3959..8c7bc25 100644
--- a/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
+++ b/ql/src/test/queries/clientpositive/dbtxnmgr_compact3.q
@@ -5,7 +5,7 @@ create database D1;
 
 use D1;
 
-create table T1(key string, val string) stored as textfile;
+create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true');
 
 alter table T1 compact 'major';
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/compact_non_acid_table.q.out b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
new file mode 100644
index 0000000..eab9e19
--- /dev/null
+++ b/ql/src/test/results/clientnegative/compact_non_acid_table.q.out
@@ -0,0 +1,11 @@
+PREHOOK: query: create table not_an_acid_table (a int, b varchar(128))
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@not_an_acid_table
+POSTHOOK: query: create table not_an_acid_table (a int, b varchar(128))
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@not_an_acid_table
+PREHOOK: query: alter table not_an_acid_table compact 'major'
+PREHOOK: type: ALTERTABLE_COMPACT
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Compaction is not allowed on non-ACID table default.not_an_acid_table

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out
index 46216f9..0dad32d 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact1.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@T1
-POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T1

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out
index 40280a9..2114575 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact2.q.out
@@ -1,8 +1,8 @@
-PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile
+PREHOOK: query: create table T1(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@T1
-POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) stored as textfile
+POSTHOOK: query: create table T1(key string, val string) partitioned by (ds string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T1

http://git-wip-us.apache.org/repos/asf/hive/blob/b6af0124/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out
index 07e4e6d..1fa090f 100644
--- a/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out
+++ b/ql/src/test/results/clientpositive/dbtxnmgr_compact3.q.out
@@ -10,11 +10,11 @@ PREHOOK: Input: database:d1
 POSTHOOK: query: use D1
 POSTHOOK: type: SWITCHDATABASE
 POSTHOOK: Input: database:d1
-PREHOOK: query: create table T1(key string, val string) stored as textfile
+PREHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: D1@T1
 PREHOOK: Output: database:d1
-POSTHOOK: query: create table T1(key string, val string) stored as textfile
+POSTHOOK: query: create table T1(key string, val string) clustered by (val) into 2 buckets stored as ORC TBLPROPERTIES ('transactional'='true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: D1@T1
 POSTHOOK: Output: database:d1


[23/51] [abbrv] hive git commit: HIVE-13144 : HS2 can leak ZK ACL objects when curator retries to create the persistent ephemeral node (Vaibhav Gumashta via Thejas Nair)

Posted by jd...@apache.org.
HIVE-13144 : HS2 can leak ZK ACL objects when curator retries to create the persistent ephemeral node (Vaibhav Gumashta via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1e8a31e8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1e8a31e8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1e8a31e8

Branch: refs/heads/llap
Commit: 1e8a31e8fc6aa4ef919b6089e35cc278e66a3b98
Parents: 0da77af
Author: Vaibhav Gumashta <vg...@hortonworks.com>
Authored: Thu Mar 10 10:17:54 2016 -0800
Committer: Thejas Nair <th...@hortonworks.com>
Committed: Thu Mar 10 10:17:54 2016 -0800

----------------------------------------------------------------------
 service/src/java/org/apache/hive/service/server/HiveServer2.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1e8a31e8/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 892a476..ab834b9 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -220,10 +220,10 @@ public class HiveServer2 extends CompositeService {
    * ACLProvider for providing appropriate ACLs to CuratorFrameworkFactory
    */
   private final ACLProvider zooKeeperAclProvider = new ACLProvider() {
-    List<ACL> nodeAcls = new ArrayList<ACL>();
 
     @Override
     public List<ACL> getDefaultAcl() {
+      List<ACL> nodeAcls = new ArrayList<ACL>();
       if (UserGroupInformation.isSecurityEnabled()) {
         // Read all to the world
         nodeAcls.addAll(Ids.READ_ACL_UNSAFE);


[47/51] [abbrv] hive git commit: HIVE-11675 : make use of file footer PPD API in ETL strategy or separate strategy (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by jd...@apache.org.
HIVE-11675 : make use of file footer PPD API in ETL strategy or separate strategy (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/868db42a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/868db42a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/868db42a

Branch: refs/heads/llap
Commit: 868db42a695e3137c65b53386eb4d2b2ec76b265
Parents: 26b5c7b
Author: Sergey Shelukhin <se...@apache.org>
Authored: Tue Mar 15 18:37:29 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Tue Mar 15 18:37:29 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   5 +-
 .../hadoop/hive/metastore/FileFormatProxy.java  |   6 +-
 .../hive/metastore/FileMetadataHandler.java     |   2 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  19 +-
 .../hive/metastore/HiveMetaStoreClient.java     |  43 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |   4 +
 .../filemeta/OrcFileMetadataHandler.java        |  15 +-
 orc/src/java/org/apache/orc/impl/InStream.java  |   2 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |   4 +-
 .../org/apache/hadoop/hive/ql/io/HdfsUtils.java |  17 +
 .../hadoop/hive/ql/io/orc/ExternalCache.java    | 338 +++++++++++
 .../hadoop/hive/ql/io/orc/LocalCache.java       | 112 ++++
 .../io/orc/MetastoreExternalCachesByConf.java   |  82 +++
 .../hive/ql/io/orc/OrcFileFormatProxy.java      |  14 +-
 .../hadoop/hive/ql/io/orc/OrcInputFormat.java   | 593 ++++++++-----------
 .../hive/ql/io/orc/OrcNewInputFormat.java       |  16 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  12 +-
 .../hive/ql/io/orc/TestInputOutputFormat.java   |  12 +-
 .../hive/ql/io/orc/TestOrcSplitElimination.java | 405 +++++++++++--
 .../hadoop/hive/ql/io/sarg/PredicateLeaf.java   |   1 -
 .../hive/ql/io/sarg/SearchArgumentImpl.java     |   1 -
 21 files changed, 1286 insertions(+), 417 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 9fd6648..98c6372 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1213,6 +1213,9 @@ public class HiveConf extends Configuration {
 
     HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false,
         "Whether to enable using file metadata cache in metastore for ORC file footers."),
+    HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true,
+        "Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" +
+        "must also be set to true for this to work)."),
 
     HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
         "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
@@ -1222,7 +1225,7 @@ public class HiveConf extends Configuration {
         "generation. 0 means process directories individually. This can increase the number of\n" +
         "metastore calls if metastore metadata cache is used."),
     HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true,
-        "Include file ID in splits on file systems thaty support it."),
+        "Include file ID in splits on file systems that support it."),
     HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true,
         "Allow synthetic file ID in splits on file systems that don't have a native one."),
     HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java
index ec0be2b..14ff187 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.Metastore.SplitInfos;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 
 /**
@@ -33,11 +34,10 @@ public interface FileFormatProxy {
   /**
    * Applies SARG to file metadata, and produces some result for this file.
    * @param sarg SARG
-   * @param byteBuffer File metadata from metastore cache.
+   * @param fileMetadata File metadata from metastore cache.
    * @return The result to return to client for this file, or null if file is eliminated.
-   * @throws IOException
    */
-  ByteBuffer applySargToMetadata(SearchArgument sarg, ByteBuffer byteBuffer) throws IOException;
+  SplitInfos applySargToMetadata(SearchArgument sarg, ByteBuffer fileMetadata) throws IOException;
 
   /**
    * @param fs The filesystem of the file.

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java
index bd4e188..832daec 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/FileMetadataHandler.java
@@ -37,7 +37,7 @@ import org.apache.hadoop.hive.metastore.hbase.MetadataStore;
  * contains the actual implementation that depends on some stuff in QL (for ORC).
  */
 public abstract class FileMetadataHandler {
-  static final Log LOG = LogFactory.getLog(FileMetadataHandler.class);
+  protected static final Log LOG = LogFactory.getLog(FileMetadataHandler.class);
 
   private Configuration conf;
   private PartitionExpressionProxy expressionProxy;

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 2fa0e9a..c9fadad 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5947,17 +5947,16 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean[] eliminated = new boolean[fileIds.size()];
 
       getMS().getFileMetadataByExpr(fileIds, type, req.getExpr(), metadatas, ppdResults, eliminated);
-      for (int i = 0; i < metadatas.length; ++i) {
-        long fileId = fileIds.get(i);
-        ByteBuffer metadata = metadatas[i];
-        if (metadata == null) continue;
-        metadata = (eliminated[i] || !needMetadata) ? null
-            : handleReadOnlyBufferForThrift(metadata);
+      for (int i = 0; i < fileIds.size(); ++i) {
+        if (!eliminated[i] && ppdResults[i] == null) continue; // No metadata => no ppd.
         MetadataPpdResult mpr = new MetadataPpdResult();
-        ByteBuffer bitset = eliminated[i] ? null : handleReadOnlyBufferForThrift(ppdResults[i]);
-        mpr.setMetadata(metadata);
-        mpr.setIncludeBitset(bitset);
-        result.putToMetadata(fileId, mpr);
+        ByteBuffer ppdResult = eliminated[i] ? null : handleReadOnlyBufferForThrift(ppdResults[i]);
+        mpr.setIncludeBitset(ppdResult);
+        if (needMetadata) {
+          ByteBuffer metadata = eliminated[i] ? null : handleReadOnlyBufferForThrift(metadatas[i]);
+          mpr.setMetadata(metadata);
+        }
+        result.putToMetadata(fileIds.get(i), mpr);
       }
       if (!result.isSetMetadata()) {
         result.setMetadata(EMPTY_MAP_FM2); // Set the required field.

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 9048d45..cdd12ab 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
@@ -54,6 +53,8 @@ import org.apache.hadoop.hive.metastore.api.FireEventResponse;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
 import org.apache.hadoop.hive.metastore.api.GetChangeVersionRequest;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest;
+import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult;
 import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest;
 import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
@@ -79,6 +80,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 import org.apache.hadoop.hive.metastore.api.LockRequest;
 import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
 import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
@@ -2247,15 +2249,48 @@ public class HiveMetaStoreClient implements IMetaStoreClient {
         if (listIndex == fileIds.size()) return null;
         int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size());
         List<Long> subList = fileIds.subList(listIndex, endIndex);
-        GetFileMetadataRequest req = new GetFileMetadataRequest();
-        req.setFileIds(subList);
-        GetFileMetadataResult resp = client.get_file_metadata(req);
+        GetFileMetadataResult resp = sendGetFileMetadataReq(subList);
+        // TODO: we could remember if it's unsupported and stop sending calls; although, it might
+        //       be a bad idea for HS2+standalone metastore that could be updated with support.
+        //       Maybe we should just remember this for some time.
+        if (!resp.isIsSupported()) return null;
         listIndex = endIndex;
         return resp.getMetadata();
       }
     };
   }
 
+  private GetFileMetadataResult sendGetFileMetadataReq(List<Long> fileIds) throws TException {
+    return client.get_file_metadata(new GetFileMetadataRequest(fileIds));
+  }
+
+  @Override
+  public Iterable<Entry<Long, MetadataPpdResult>> getFileMetadataBySarg(
+      final List<Long> fileIds, final ByteBuffer sarg, final boolean doGetFooters)
+          throws TException {
+    return new MetastoreMapIterable<Long, MetadataPpdResult>() {
+      private int listIndex = 0;
+      @Override
+      protected Map<Long, MetadataPpdResult> fetchNextBatch() throws TException {
+        if (listIndex == fileIds.size()) return null;
+        int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size());
+        List<Long> subList = fileIds.subList(listIndex, endIndex);
+        GetFileMetadataByExprResult resp = sendGetFileMetadataBySargReq(
+            sarg, subList, doGetFooters);
+        if (!resp.isIsSupported()) return null;
+        listIndex = endIndex;
+        return resp.getMetadata();
+      }
+    };
+  }
+
+  private GetFileMetadataByExprResult sendGetFileMetadataBySargReq(
+      ByteBuffer sarg, List<Long> fileIds, boolean doGetFooters) throws TException {
+    GetFileMetadataByExprRequest req = new GetFileMetadataByExprRequest(fileIds, sarg);
+    req.setDoGetFooters(doGetFooters); // No need to get footers
+    return client.get_file_metadata_by_expr(req);
+  }
+
   public static abstract class MetastoreMapIterable<K, V>
     implements Iterable<Entry<K, V>>, Iterator<Entry<K, V>> {
     private Iterator<Entry<K, V>> currentIter;

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 62677d1..39cf927 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -55,6 +55,7 @@ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 import org.apache.hadoop.hive.metastore.api.LockRequest;
 import org.apache.hadoop.hive.metastore.api.LockResponse;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
 import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
@@ -1534,6 +1535,9 @@ public interface IMetaStoreClient {
    */
   Iterable<Entry<Long, ByteBuffer>> getFileMetadata(List<Long> fileIds) throws TException;
 
+  Iterable<Entry<Long, MetadataPpdResult>> getFileMetadataBySarg(
+      List<Long> fileIds, ByteBuffer sarg, boolean doGetFooters) throws TException;
+
   /**
    * Cleares the file metadata cache for respective file IDs.
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/metastore/src/java/org/apache/hadoop/hive/metastore/filemeta/OrcFileMetadataHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/filemeta/OrcFileMetadataHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/filemeta/OrcFileMetadataHandler.java
index 1b388aa..3bca85d 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/filemeta/OrcFileMetadataHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/filemeta/OrcFileMetadataHandler.java
@@ -23,6 +23,7 @@ import java.nio.ByteBuffer;
 import java.util.List;
 
 import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+import org.apache.hadoop.hive.metastore.Metastore.SplitInfos;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 
@@ -44,11 +45,21 @@ public class OrcFileMetadataHandler extends FileMetadataHandler {
     }
     getStore().getFileMetadata(fileIds, metadatas);
     for (int i = 0; i < metadatas.length;  ++i) {
+      eliminated[i] = false;
+      results[i] = null;
       if (metadatas[i] == null) continue;
-      ByteBuffer result = getFileFormatProxy().applySargToMetadata(sarg, metadatas[i]);
+      ByteBuffer metadata = metadatas[i].duplicate(); // Duplicate to avoid modification.
+      SplitInfos result = null;
+      try {
+        result = getFileFormatProxy().applySargToMetadata(sarg, metadata);
+      } catch (IOException ex) {
+        LOG.error("Failed to apply SARG to metadata", ex);
+        metadatas[i] = null;
+        continue;
+      }
       eliminated[i] = (result == null);
       if (!eliminated[i]) {
-        results[i] = result;
+        results[i] = ByteBuffer.wrap(result.toByteArray());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/orc/src/java/org/apache/orc/impl/InStream.java
----------------------------------------------------------------------
diff --git a/orc/src/java/org/apache/orc/impl/InStream.java b/orc/src/java/org/apache/orc/impl/InStream.java
index b1c6de5..1893afe 100644
--- a/orc/src/java/org/apache/orc/impl/InStream.java
+++ b/orc/src/java/org/apache/orc/impl/InStream.java
@@ -35,7 +35,7 @@ import com.google.protobuf.CodedInputStream;
 public abstract class InStream extends InputStream {
 
   private static final Logger LOG = LoggerFactory.getLogger(InStream.class);
-  private static final int PROTOBUF_MESSAGE_MAX_LIMIT = 1024 << 20; // 1GB
+  public static final int PROTOBUF_MESSAGE_MAX_LIMIT = 1024 << 20; // 1GB
 
   protected final String name;
   protected long length;

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 2b50a2a..9446876 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -481,7 +481,7 @@ public class AcidUtils {
       try {
         childrenWithId = SHIMS.listLocatedHdfsStatus(fs, directory, hiddenFileFilter);
       } catch (Throwable t) {
-        LOG.error("Failed to get files with ID; using regular API", t);
+        LOG.error("Failed to get files with ID; using regular API: " + t.getMessage());
         useFileIds = false;
       }
     }
@@ -648,7 +648,7 @@ public class AcidUtils {
       try {
         childrenWithId = SHIMS.listLocatedHdfsStatus(fs, stat.getPath(), hiddenFileFilter);
       } catch (Throwable t) {
-        LOG.error("Failed to get files with ID; using regular API", t);
+        LOG.error("Failed to get files with ID; using regular API: " + t.getMessage());
         useFileIds = false;
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
index 1a40847..b71ca09 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HdfsUtils.java
@@ -47,6 +47,23 @@ public class HdfsUtils {
     return new SyntheticFileId(path, fs.getLen(), fs.getModificationTime());
   }
 
+  public static long createFileId(String pathStr, FileStatus fs, boolean doLog, String fsName) {
+    int nameHash = pathStr.hashCode();
+    long fileSize = fs.getLen(), modTime = fs.getModificationTime();
+    int fileSizeHash = (int)(fileSize ^ (fileSize >>> 32)),
+        modTimeHash = (int)(modTime ^ (modTime >>> 32)),
+        combinedHash = modTimeHash ^ fileSizeHash;
+    long id = (((long)nameHash & 0xffffffffL) << 32) | ((long)combinedHash & 0xffffffffL);
+    if (doLog) {
+      LOG.warn("Cannot get unique file ID from " + fsName + "; using " + id
+          + " (" + pathStr + "," + nameHash + "," + fileSize + ")");
+    }
+    return id;
+  }
+
+
+
+
   // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this
   //       is still going to work. Otherwise, file IDs can be turned off. Later, we should use
   //       as public utility method in HDFS to obtain the inode-based path.

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
new file mode 100644
index 0000000..6556fbf
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java
@@ -0,0 +1,338 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+import org.apache.hadoop.hive.ql.io.HdfsUtils;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.FileInfo;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.FooterCache;
+import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
+import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
+import org.apache.orc.FileMetaInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.io.Output;
+import com.google.common.collect.Lists;
+
+/** Metastore-based footer cache storing serialized footers. Also has a local cache. */
+public class ExternalCache implements FooterCache {
+  private static final Logger LOG = LoggerFactory.getLogger(ExternalCache.class);
+  private static boolean isDebugEnabled = LOG.isDebugEnabled();
+
+  private final LocalCache localCache;
+  private final ExternalFooterCachesByConf externalCacheSrc;
+  private boolean isWarnLogged = false;
+
+  // Configuration and things set from it.
+  private HiveConf conf;
+  private boolean isInTest;
+  private SearchArgument sarg;
+  private ByteBuffer sargIsOriginal, sargNotIsOriginal;
+  private boolean isPpdEnabled;
+
+  public ExternalCache(LocalCache lc, ExternalFooterCachesByConf efcf) {
+    localCache = lc;
+    externalCacheSrc = efcf;
+  }
+
+  @Override
+  public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader)
+      throws IOException {
+    localCache.put(fileId, file, fileMetaInfo, orcReader);
+    if (fileId != null) {
+      try {
+        externalCacheSrc.getCache(conf).putFileMetadata(Lists.newArrayList(fileId),
+            Lists.newArrayList(((ReaderImpl)orcReader).getSerializedFileFooter()));
+      } catch (HiveException e) {
+        throw new IOException(e);
+      }
+    }
+  }
+
+  @Override
+  public boolean isBlocking() {
+    return true;
+  }
+
+  @Override
+  public boolean hasPpd() {
+    return isPpdEnabled;
+  }
+
+  public void configure(HiveConf queryConfig) {
+    this.conf = queryConfig;
+    this.sarg = ConvertAstToSearchArg.createFromConf(conf);
+    this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVEOPTINDEXFILTER)
+        && HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD);
+    this.isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
+    this.sargIsOriginal = this.sargNotIsOriginal = null;
+  }
+
+  @Override
+  public void getAndValidate(List<HdfsFileStatusWithId> files, boolean isOriginal,
+      FileInfo[] result, ByteBuffer[] ppdResult) throws IOException, HiveException {
+    assert result.length == files.size();
+    assert ppdResult == null || ppdResult.length == files.size();
+    // First, check the local cache.
+    localCache.getAndValidate(files, isOriginal, result, ppdResult);
+
+    // posMap is an unfortunate consequence of batching/iterating thru MS results.
+    HashMap<Long, Integer> posMap = new HashMap<Long, Integer>();
+    // We won't do metastore-side PPD for the things we have locally.
+    List<Long> fileIds = determineFileIdsToQuery(files, result, posMap);
+     // Need to get a new one, see the comment wrt threadlocals.
+    ExternalFooterCachesByConf.Cache cache = externalCacheSrc.getCache(conf);
+    ByteBuffer serializedSarg = null;
+    if (isPpdEnabled) {
+      serializedSarg = getSerializedSargForMetastore(isOriginal);
+    }
+    if (serializedSarg != null) {
+      Iterator<Entry<Long, MetadataPpdResult>> iter = cache.getFileMetadataByExpr(
+          fileIds, serializedSarg, false); // don't fetch the footer, PPD happens in MS.
+      while (iter.hasNext()) {
+        Entry<Long, MetadataPpdResult> e = iter.next();
+        int ix = getAndVerifyIndex(posMap, files, result, e.getKey());
+        processPpdResult(e.getValue(), files.get(ix), ix, result, ppdResult);
+      }
+    } else {
+      // Only populate corrupt IDs for the things we couldn't deserialize if we are not using
+      // ppd. We assume that PPD makes sure the cached values are correct (or fails otherwise);
+      // also, we don't use the footers in PPD case.
+      List<Long> corruptIds = null;
+      Iterator<Entry<Long, ByteBuffer>> iter = cache.getFileMetadata(fileIds);
+      while (iter.hasNext()) {
+        Entry<Long, ByteBuffer> e = iter.next();
+        int ix = getAndVerifyIndex(posMap, files, result, e.getKey());
+        if (!processBbResult(e.getValue(), ix, files.get(ix), result))  {
+          if (corruptIds == null) {
+            corruptIds = new ArrayList<>();
+          }
+          corruptIds.add(e.getKey());
+        }
+      }
+      if (corruptIds != null) {
+        cache.clearFileMetadata(corruptIds);
+      }
+    }
+  }
+
+  private int getAndVerifyIndex(HashMap<Long, Integer> posMap,
+      List<HdfsFileStatusWithId> files, FileInfo[] result, Long fileId) {
+    int ix = posMap.get(fileId);
+    assert result[ix] == null;
+    assert fileId != null && fileId.equals(files.get(ix).getFileId());
+    return ix;
+  }
+
+  private boolean processBbResult(
+      ByteBuffer bb, int ix, HdfsFileStatusWithId file, FileInfo[] result) throws IOException {
+    if (bb == null) return true;
+    result[ix] = createFileInfoFromMs(file, bb);
+    if (result[ix] == null) {
+      return false;
+    }
+
+    localCache.put(file.getFileStatus().getPath(), result[ix]);
+    return true;
+  }
+
+  private void processPpdResult(MetadataPpdResult mpr, HdfsFileStatusWithId file,
+      int ix, FileInfo[] result, ByteBuffer[] ppdResult) throws IOException {
+    if (mpr == null) return; // This file is unknown to metastore.
+
+    ppdResult[ix] = mpr.isSetIncludeBitset() ? mpr.bufferForIncludeBitset() : NO_SPLIT_AFTER_PPD;
+    if (mpr.isSetMetadata()) {
+      result[ix] = createFileInfoFromMs(file, mpr.bufferForMetadata());
+      if (result[ix] != null) {
+        localCache.put(file.getFileStatus().getPath(), result[ix]);
+      }
+    }
+  }
+
+  private List<Long> determineFileIdsToQuery(
+      List<HdfsFileStatusWithId> files, FileInfo[] result, HashMap<Long, Integer> posMap) {
+    for (int i = 0; i < result.length; ++i) {
+      if (result[i] != null) continue;
+      HdfsFileStatusWithId file = files.get(i);
+      final FileStatus fs = file.getFileStatus();
+      Long fileId = file.getFileId();
+      if (fileId == null) {
+        if (!isInTest) {
+          if (!isWarnLogged || isDebugEnabled) {
+            LOG.warn("Not using metastore cache because fileId is missing: " + fs.getPath());
+            isWarnLogged = true;
+          }
+          continue;
+        }
+        fileId = generateTestFileId(fs, files, i);
+        LOG.info("Generated file ID " + fileId + " at " + i);
+      }
+      posMap.put(fileId, i);
+    }
+    return Lists.newArrayList(posMap.keySet());
+  }
+
+  private Long generateTestFileId(final FileStatus fs, List<HdfsFileStatusWithId> files, int i) {
+    final Long fileId = HdfsUtils.createFileId(fs.getPath().toUri().getPath(), fs, false, null);
+    files.set(i, new HdfsFileStatusWithId() {
+      @Override
+      public FileStatus getFileStatus() {
+        return fs;
+      }
+
+      @Override
+      public Long getFileId() {
+        return fileId;
+      }
+    });
+    return fileId;
+  }
+
+  private ByteBuffer getSerializedSargForMetastore(boolean isOriginal) {
+    if (sarg == null) return null;
+    ByteBuffer serializedSarg = isOriginal ? sargIsOriginal : sargNotIsOriginal;
+    if (serializedSarg != null) return serializedSarg;
+    SearchArgument sarg2 = sarg;
+    Kryo kryo = SerializationUtilities.borrowKryo();
+    try {
+      if ((isOriginal ? sargNotIsOriginal : sargIsOriginal) == null) {
+        sarg2 = kryo.copy(sarg2); // In case we need it for the other case.
+      }
+      translateSargToTableColIndexes(sarg2, conf, OrcInputFormat.getRootColumn(isOriginal));
+      ExternalCache.Baos baos = new Baos();
+      Output output = new Output(baos);
+      kryo.writeObject(output, sarg2);
+      output.flush();
+      serializedSarg = baos.get();
+      if (isOriginal) {
+        sargIsOriginal = serializedSarg;
+      } else {
+        sargNotIsOriginal = serializedSarg;
+      }
+    } finally {
+      SerializationUtilities.releaseKryo(kryo);
+    }
+    return serializedSarg;
+  }
+
+  /**
+   * Modifies the SARG, replacing column names with column indexes in target table schema. This
+   * basically does the same thing as all the shennannigans with included columns, except for the
+   * last step where ORC gets direct subtypes of root column and uses the ordered match to map
+   * table columns to file columns. The numbers put into predicate leaf should allow to go into
+   * said subtypes directly by index to get the proper index in the file.
+   * This won't work with schema evolution, although it's probably much easier to reason about
+   * if schema evolution was to be supported, because this is a clear boundary between table
+   * schema columns and all things ORC. None of the ORC stuff is used here and none of the
+   * table schema stuff is used after that - ORC doesn't need a bunch of extra crap to apply
+   * the SARG thus modified.
+   */
+  public static void translateSargToTableColIndexes(
+      SearchArgument sarg, Configuration conf, int rootColumn) {
+    String nameStr = OrcInputFormat.getNeededColumnNamesString(conf),
+        idStr = OrcInputFormat.getSargColumnIDsString(conf);
+    String[] knownNames = nameStr.split(",");
+    String[] idStrs = (idStr == null) ? null : idStr.split(",");
+    assert idStrs == null || knownNames.length == idStrs.length;
+    HashMap<String, Integer> nameIdMap = new HashMap<>();
+    for (int i = 0; i < knownNames.length; ++i) {
+      Integer newId = (idStrs != null) ? Integer.parseInt(idStrs[i]) : i;
+      Integer oldId = nameIdMap.put(knownNames[i], newId);
+      if (oldId != null && oldId.intValue() != newId.intValue()) {
+        throw new RuntimeException("Multiple IDs for " + knownNames[i] + " in column strings: ["
+            + idStr + "], [" + nameStr + "]");
+      }
+    }
+    List<PredicateLeaf> leaves = sarg.getLeaves();
+    for (int i = 0; i < leaves.size(); ++i) {
+      PredicateLeaf pl = leaves.get(i);
+      Integer colId = nameIdMap.get(pl.getColumnName());
+      String newColName = RecordReaderImpl.encodeTranslatedSargColumn(rootColumn, colId);
+      SearchArgumentFactory.setPredicateLeafColumn(pl, newColName);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("SARG translated into " + sarg);
+    }
+  }
+
+  private static FileInfo createFileInfoFromMs(
+      HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
+    if (bb == null) return null;
+    FileStatus fs = file.getFileStatus();
+    ReaderImpl.FooterInfo fi = null;
+    ByteBuffer copy = bb.duplicate();
+    try {
+      fi = ReaderImpl.extractMetaInfoFromFooter(copy, fs.getPath());
+    } catch (Exception ex) {
+      byte[] data = new byte[bb.remaining()];
+      System.arraycopy(bb.array(), bb.arrayOffset() + bb.position(), data, 0, data.length);
+      String msg = "Failed to parse the footer stored in cache for file ID "
+          + file.getFileId() + " " + bb + " [ " + Hex.encodeHexString(data) + " ]";
+      LOG.error(msg, ex);
+      return null;
+    }
+    return new FileInfo(fs.getModificationTime(), fs.getLen(), fi.getStripes(), fi.getMetadata(),
+        fi.getFooter().getTypesList(), fi.getFooter().getStatisticsList(), fi.getFileMetaInfo(),
+        fi.getFileMetaInfo().writerVersion, file.getFileId());
+  }
+
+  private static final class Baos extends ByteArrayOutputStream {
+    public ByteBuffer get() {
+      return ByteBuffer.wrap(buf, 0, count);
+    }
+  }
+
+
+  /** An abstraction for testing ExternalCache in OrcInputFormat. */
+  public interface ExternalFooterCachesByConf {
+    public interface Cache {
+      Iterator<Map.Entry<Long, MetadataPpdResult>> getFileMetadataByExpr(List<Long> fileIds,
+          ByteBuffer serializedSarg, boolean doGetFooters) throws HiveException;
+      void clearFileMetadata(List<Long> fileIds) throws HiveException;
+      Iterator<Map.Entry<Long, ByteBuffer>>  getFileMetadata(List<Long> fileIds)
+          throws HiveException;
+      void putFileMetadata(
+          ArrayList<Long> keys, ArrayList<ByteBuffer> values) throws HiveException;
+    }
+
+    public Cache getCache(HiveConf conf) throws IOException;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
new file mode 100644
index 0000000..8151e52
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/LocalCache.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.FileInfo;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.FooterCache;
+import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId;
+import org.apache.orc.FileMetaInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
+/** Local footer cache using Guava. Stores convoluted Java objects. */
+class LocalCache implements FooterCache {
+  private static final Logger LOG = LoggerFactory.getLogger(LocalCache.class);
+  private static boolean isDebugEnabled = LOG.isDebugEnabled();
+
+  private final Cache<Path, FileInfo> cache;
+
+  public LocalCache(int numThreads, int cacheStripeDetailsSize) {
+    cache = CacheBuilder.newBuilder()
+      .concurrencyLevel(numThreads)
+      .initialCapacity(cacheStripeDetailsSize)
+      .maximumSize(cacheStripeDetailsSize)
+      .softValues()
+      .build();
+  }
+
+  public void clear() {
+    cache.invalidateAll();
+    cache.cleanUp();
+  }
+
+  public void getAndValidate(List<HdfsFileStatusWithId> files, boolean isOriginal,
+      FileInfo[] result, ByteBuffer[] ppdResult) throws IOException {
+    // TODO: should local cache also be by fileId? Preserve the original logic for now.
+    assert result.length == files.size();
+    int i = -1;
+    for (HdfsFileStatusWithId fileWithId : files) {
+      ++i;
+      FileStatus file = fileWithId.getFileStatus();
+      Path path = file.getPath();
+      Long fileId = fileWithId.getFileId();
+      FileInfo fileInfo = cache.getIfPresent(path);
+      if (isDebugEnabled) {
+        LOG.debug("Info " + (fileInfo == null ? "not " : "") + "cached for path: " + path);
+      }
+      if (fileInfo == null) continue;
+      if ((fileId != null && fileInfo.fileId != null && fileId == fileInfo.fileId)
+          || (fileInfo.modificationTime == file.getModificationTime() &&
+          fileInfo.size == file.getLen())) {
+        result[i] = fileInfo;
+        continue;
+      }
+      // Invalidate
+      cache.invalidate(path);
+      if (isDebugEnabled) {
+        LOG.debug("Meta-Info for : " + path + " changed. CachedModificationTime: "
+            + fileInfo.modificationTime + ", CurrentModificationTime: "
+            + file.getModificationTime() + ", CachedLength: " + fileInfo.size
+            + ", CurrentLength: " + file.getLen());
+      }
+    }
+  }
+
+  public void put(Path path, FileInfo fileInfo) {
+    cache.put(path, fileInfo);
+  }
+
+  @Override
+  public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader)
+      throws IOException {
+    cache.put(file.getPath(), new FileInfo(file.getModificationTime(), file.getLen(),
+        orcReader.getStripes(), orcReader.getStripeStatistics(), orcReader.getTypes(),
+        orcReader.getOrcProtoFileStatistics(), fileMetaInfo, orcReader.getWriterVersion(),
+        fileId));
+  }
+
+  @Override
+  public boolean isBlocking() {
+    return false;
+  }
+
+  @Override
+  public boolean hasPpd() {
+    return false;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetastoreExternalCachesByConf.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetastoreExternalCachesByConf.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetastoreExternalCachesByConf.java
new file mode 100644
index 0000000..ad8f4ef
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/MetastoreExternalCachesByConf.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.ql.io.orc.ExternalCache.ExternalFooterCachesByConf;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+
+/**
+ * An implementation of external cache and factory based on metastore.
+ */
+public class MetastoreExternalCachesByConf implements ExternalFooterCachesByConf {
+  public static class HBaseCache implements ExternalFooterCachesByConf.Cache {
+    private Hive hive;
+
+    public HBaseCache(Hive hive) {
+      this.hive = hive;
+    }
+
+    @Override
+    public Iterator<Entry<Long, MetadataPpdResult>> getFileMetadataByExpr(
+        List<Long> fileIds, ByteBuffer sarg, boolean doGetFooters) throws HiveException {
+      return hive.getFileMetadataByExpr(fileIds, sarg, doGetFooters).iterator();
+    }
+
+    @Override
+    public void clearFileMetadata(List<Long> fileIds) throws HiveException {
+      hive.clearFileMetadata(fileIds);
+    }
+
+    @Override
+    public Iterator<Entry<Long, ByteBuffer>> getFileMetadata(
+        List<Long> fileIds) throws HiveException {
+      return hive.getFileMetadata(fileIds).iterator();
+    }
+
+    @Override
+    public void putFileMetadata(
+        ArrayList<Long> fileIds, ArrayList<ByteBuffer> metadata) throws HiveException {
+      hive.putFileMetadata(fileIds, metadata);
+    }
+  }
+
+  @Override
+  public ExternalFooterCachesByConf.Cache getCache(HiveConf conf) throws IOException {
+    // TODO: we wish we could cache the Hive object, but it's not thread safe, and each
+    //       threadlocal we "cache" would need to be reinitialized for every query. This is
+    //       a huge PITA. Hive object will be cached internally, but the compat check will be
+    //       done every time inside get().
+    try {
+      return new HBaseCache(Hive.getWithFastCheck(conf));
+    } catch (HiveException e) {
+      throw new IOException(e);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileFormatProxy.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileFormatProxy.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileFormatProxy.java
index ef76723..c9c7b5a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileFormatProxy.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFileFormatProxy.java
@@ -29,16 +29,19 @@ import org.apache.hadoop.hive.metastore.Metastore.SplitInfos;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.orc.OrcProto;
 import org.apache.orc.StripeInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** File format proxy for ORC. */
 public class OrcFileFormatProxy implements FileFormatProxy {
+  private static final Logger LOG = LoggerFactory.getLogger(OrcFileFormatProxy.class);
 
   @Override
-  public ByteBuffer applySargToMetadata(
-      SearchArgument sarg, ByteBuffer byteBuffer) throws IOException {
+  public SplitInfos applySargToMetadata(
+      SearchArgument sarg, ByteBuffer fileMetadata) throws IOException {
     // TODO: ideally we should store shortened representation of only the necessary fields
     //       in HBase; it will probably require custom SARG application code.
-    ReaderImpl.FooterInfo fi = ReaderImpl.extractMetaInfoFromFooter(byteBuffer, null);
+    ReaderImpl.FooterInfo fi = ReaderImpl.extractMetaInfoFromFooter(fileMetadata, null);
     OrcProto.Footer footer = fi.getFooter();
     int stripeCount = footer.getStripesCount();
     boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg(
@@ -52,10 +55,13 @@ public class OrcFileFormatProxy implements FileFormatProxy {
       if (result != null && !result[i]) continue;
       isEliminated = false;
       StripeInformation si = stripes.get(i);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("PPD is adding a split " + i + ": " + si.getOffset() + ", " + si.getLength());
+      }
       sb.addInfos(SplitInfo.newBuilder().setIndex(i)
           .setOffset(si.getOffset()).setLength(si.getLength()));
     }
-    return isEliminated ? null : ByteBuffer.wrap(sb.build().toByteArray());
+    return isEliminated ? null : sb.build();
   }
 
   public ByteBuffer[] getAddedColumnsToCache() {

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
index cd2a668..8b611bb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
@@ -18,16 +18,17 @@
 
 package org.apache.hadoop.hive.ql.io.orc;
 
+import org.apache.orc.impl.InStream;
+
+  
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
@@ -40,7 +41,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.commons.codec.binary.Hex;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.serde.serdeConstants;
@@ -70,6 +70,8 @@ import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.Metastore;
+import org.apache.hadoop.hive.metastore.Metastore.SplitInfos;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
@@ -84,15 +86,14 @@ import org.apache.hadoop.hive.ql.io.LlapWrappableInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.RecordIdentifier;
 import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
 import org.apache.hadoop.hive.ql.io.StatsProvidingRecordReader;
+import org.apache.hadoop.hive.ql.io.orc.ExternalCache.ExternalFooterCachesByConf;
 import org.apache.hadoop.hive.ql.io.SyntheticFileId;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.TruthValue;
-import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
-import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory;
 import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
 import org.apache.hadoop.hive.serde2.SerDeStats;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -111,10 +112,9 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.orc.OrcProto;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedInputStream;
 /**
  * A MapReduce/Hive input format for ORC files.
  * <p>
@@ -274,7 +274,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
    * @param isOriginal is the file in the original format?
    * @return the column number for the root of row.
    */
-  private static int getRootColumn(boolean isOriginal) {
+  static int getRootColumn(boolean isOriginal) {
     return isOriginal ? 0 : (OrcRecordUpdater.ROW + 1);
   }
 
@@ -335,45 +335,6 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
   }
 
-  /**
-   * Modifies the SARG, replacing column names with column indexes in target table schema. This
-   * basically does the same thing as all the shennannigans with included columns, except for the
-   * last step where ORC gets direct subtypes of root column and uses the ordered match to map
-   * table columns to file columns. The numbers put into predicate leaf should allow to go into
-   * said subtypes directly by index to get the proper index in the file.
-   * This won't work with schema evolution, although it's probably much easier to reason about
-   * if schema evolution was to be supported, because this is a clear boundary between table
-   * schema columns and all things ORC. None of the ORC stuff is used here and none of the
-   * table schema stuff is used after that - ORC doesn't need a bunch of extra crap to apply
-   * the SARG thus modified.
-   */
-  public static void translateSargToTableColIndexes(
-      SearchArgument sarg, Configuration conf, int rootColumn) {
-    String nameStr = getNeededColumnNamesString(conf), idStr = getSargColumnIDsString(conf);
-    String[] knownNames = nameStr.split(",");
-    String[] idStrs = (idStr == null) ? null : idStr.split(",");
-    assert idStrs == null || knownNames.length == idStrs.length;
-    HashMap<String, Integer> nameIdMap = new HashMap<>();
-    for (int i = 0; i < knownNames.length; ++i) {
-      Integer newId = (idStrs != null) ? Integer.parseInt(idStrs[i]) : i;
-      Integer oldId = nameIdMap.put(knownNames[i], newId);
-      if (oldId != null && oldId.intValue() != newId.intValue()) {
-        throw new RuntimeException("Multiple IDs for " + knownNames[i] + " in column strings: ["
-            + idStr + "], [" + nameStr + "]");
-      }
-    }
-    List<PredicateLeaf> leaves = sarg.getLeaves();
-    for (int i = 0; i < leaves.size(); ++i) {
-      PredicateLeaf pl = leaves.get(i);
-      Integer colId = nameIdMap.get(pl.getColumnName());
-      String newColName = RecordReaderImpl.encodeTranslatedSargColumn(rootColumn, colId);
-      SearchArgumentFactory.setPredicateLeafColumn(pl, newColName);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("SARG translated into " + sarg);
-    }
-  }
-
   public static boolean[] genIncludedColumns(
       List<OrcProto.Type> types, List<Integer> included, boolean isOriginal) {
     int rootColumn = getRootColumn(isOriginal);
@@ -477,14 +438,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     return getSargColumnNames(columnNamesString.split(","), types, include, isOriginal);
   }
 
-  private static String getNeededColumnNamesString(Configuration conf) {
+  static String getNeededColumnNamesString(Configuration conf) {
     return conf.get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR);
   }
 
-  private static String getSargColumnIDsString(Configuration conf) {
+  static String getSargColumnIDsString(Configuration conf) {
     return conf.getBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, true) ? null
         : conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR);
   }
+
   @Override
   public boolean validateInput(FileSystem fs, HiveConf conf,
                                List<FileStatus> files
@@ -542,7 +504,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     // This is not thread safe between different split generations (and wasn't anyway).
     private FooterCache footerCache;
     private static LocalCache localCache;
-    private static MetastoreCache metaCache;
+    private static ExternalCache metaCache;
     static ExecutorService threadPool = null;
     private final int numBuckets;
     private final int splitStrategyBatchMs;
@@ -559,10 +521,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     private final SearchArgument sarg;
 
     Context(Configuration conf) {
-      this(conf, 1);
+      this(conf, 1, null);
     }
 
     Context(Configuration conf, final int minSplits) {
+      this(conf, minSplits, null);
+    }
+
+    @VisibleForTesting
+    Context(Configuration conf, final int minSplits, ExternalFooterCachesByConf efc) {
       this.conf = conf;
       this.forceThreadpool = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
       this.sarg = ConvertAstToSearchArg.createFromConf(conf);
@@ -603,20 +570,22 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           // HDFS, because only HDFS would return fileIds for us. If fileId is extended using
           // size/mod time/etc. for other FSes, we might need to check FSes explicitly because
           // using such an aggregate fileId cache is not bulletproof and should be disable-able.
-          boolean useMetastoreCache = HiveConf.getBoolVar(
+          boolean useExternalCache = HiveConf.getBoolVar(
               conf, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED);
           if (localCache == null) {
             localCache = new LocalCache(numThreads, cacheStripeDetailsSize);
           }
-          if (useMetastoreCache) {
+          if (useExternalCache) {
             if (metaCache == null) {
-              metaCache = new MetastoreCache(localCache);
+              metaCache = new ExternalCache(localCache,
+                  efc == null ? new MetastoreExternalCachesByConf() : efc);
             }
             assert conf instanceof HiveConf;
             metaCache.configure((HiveConf)conf);
           }
           // Set footer cache for current split generation. See field comment - not thread safe.
-          footerCache = useMetastoreCache ? metaCache : localCache;
+          // TODO: we should be able to enable caches separately
+          footerCache = useExternalCache ? metaCache : localCache;
         }
       }
       String value = conf.get(ValidTxnList.VALID_TXNS_KEY,
@@ -638,6 +607,12 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         threadPool = null;
       }
     }
+
+    @VisibleForTesting
+    public static void clearLocalCache() {
+      if (localCache == null) return;
+      localCache.clear();
+    }
   }
 
   /**
@@ -676,12 +651,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     private final boolean isOriginal;
     private final List<DeltaMetaData> deltas;
     private final boolean hasBase;
+    private final ByteBuffer ppdResult;
 
-    SplitInfo(Context context, FileSystem fs,
-        HdfsFileStatusWithId fileWithId, FileInfo fileInfo,
-        boolean isOriginal,
-        List<DeltaMetaData> deltas,
-        boolean hasBase, Path dir, boolean[] covered) throws IOException {
+    SplitInfo(Context context, FileSystem fs, HdfsFileStatusWithId fileWithId, FileInfo fileInfo,
+        boolean isOriginal, List<DeltaMetaData> deltas, boolean hasBase, Path dir,
+        boolean[] covered, ByteBuffer ppdResult) throws IOException {
       super(dir, context.numBuckets, deltas, covered);
       this.context = context;
       this.fs = fs;
@@ -690,6 +664,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       this.isOriginal = isOriginal;
       this.deltas = deltas;
       this.hasBase = hasBase;
+      this.ppdResult = ppdResult;
     }
 
     @VisibleForTesting
@@ -697,7 +672,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         boolean isOriginal, ArrayList<DeltaMetaData> deltas, boolean hasBase, Path dir,
         boolean[] covered) throws IOException {
       this(context, fs, AcidUtils.createOriginalObj(null, fileStatus),
-          fileInfo, isOriginal, deltas, hasBase, dir, covered);
+          fileInfo, isOriginal, deltas, hasBase, dir, covered, null);
     }
   }
 
@@ -719,14 +694,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       private final FileSystem fs;
     }
 
-
     Context context;
-    List<ETLDir> dirs;
+    final List<ETLDir> dirs;
     List<HdfsFileStatusWithId> files;
-    boolean isOriginal;
-    List<DeltaMetaData> deltas;
-    boolean[] covered;
-    private List<Future<List<OrcSplit>>> splitFuturesRef;
+    private final List<DeltaMetaData> deltas;
+    private final boolean[] covered;
+    final boolean isOriginal;
+    // References to external fields for async SplitInfo generation.
+    private List<Future<List<OrcSplit>>> splitFuturesRef = null;
+    private List<OrcSplit> splitsRef = null;
     private final UserGroupInformation ugi;
     private final boolean allowSyntheticFileIds;
 
@@ -748,10 +724,19 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     public List<SplitInfo> getSplits() throws IOException {
       List<SplitInfo> result = new ArrayList<>(files.size());
       // Force local cache if we have deltas.
-      FooterCache cache = context.cacheStripeDetails ?
-          (deltas == null ? context.footerCache : Context.localCache) : null;
+      FooterCache cache = context.cacheStripeDetails ? ((deltas == null || deltas.isEmpty())
+          ? context.footerCache : Context.localCache) : null;
       if (cache != null) {
-        FileInfo[] infos = cache.getAndValidate(files);
+        FileInfo[] infos = new FileInfo[files.size()];
+        ByteBuffer[] ppdResults = null;
+        if (cache.hasPpd()) {
+          ppdResults = new ByteBuffer[files.size()];
+        }
+        try {
+          cache.getAndValidate(files, isOriginal, infos, ppdResults);
+        } catch (HiveException e) {
+          throw new IOException(e);
+        }
         int dirIx = -1, fileInDirIx = -1, filesInDirCount = 0;
         ETLDir dir = null;
         for (int i = 0; i < files.size(); ++i) {
@@ -760,15 +745,16 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
             filesInDirCount = dir.fileCount;
           }
           FileInfo info = infos[i];
+          ByteBuffer ppdResult = ppdResults == null ? null : ppdResults[i];
+          HdfsFileStatusWithId file = files.get(i);
           if (info != null) {
             // Cached copy is valid
             context.cacheHitCounter.incrementAndGet();
           }
-          HdfsFileStatusWithId file = files.get(i);
-          // ignore files of 0 length
-          if (file.getFileStatus().getLen() > 0) {
-            result.add(new SplitInfo(
-                context, dir.fs, file, info, isOriginal, deltas, true, dir.dir, covered));
+          // Ignore files eliminated by PPD, or of 0 length.
+          if (ppdResult != FooterCache.NO_SPLIT_AFTER_PPD && file.getFileStatus().getLen() > 0) {
+            result.add(new SplitInfo(context, dir.fs, file, info,
+                isOriginal, deltas, true, dir.dir, covered, ppdResult));
           }
         }
       } else {
@@ -781,8 +767,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           }
           // ignore files of 0 length
           if (file.getFileStatus().getLen() > 0) {
-            result.add(new SplitInfo(
-                context, dir.fs, file, null, isOriginal, deltas, true, dir.dir, covered));
+            result.add(new SplitInfo(context, dir.fs, file, null,
+                isOriginal, deltas, true, dir.dir, covered, null));
           }
         }
       }
@@ -826,14 +812,15 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       return CombineResult.YES;
     }
 
-    public Future<Void> generateSplitWork(
-        Context context, List<Future<List<OrcSplit>>> splitFutures) throws IOException {
+    public Future<Void> generateSplitWork(Context context,
+        List<Future<List<OrcSplit>>> splitFutures, List<OrcSplit> splits) throws IOException {
       if ((context.cacheStripeDetails && context.footerCache.isBlocking())
           || context.forceThreadpool) {
         this.splitFuturesRef = splitFutures;
+        this.splitsRef = splits;
         return Context.threadPool.submit(this);
       } else {
-        runGetSplitsSync(splitFutures, null);
+        runGetSplitsSync(splitFutures, splits, null);
         return null;
       }
     }
@@ -841,14 +828,14 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     @Override
     public Void call() throws IOException {
       if (ugi == null) {
-        runGetSplitsSync(splitFuturesRef, null);
+        runGetSplitsSync(splitFuturesRef, splitsRef, null);
         return null;
       }
       try {
         return ugi.doAs(new PrivilegedExceptionAction<Void>() {
           @Override
           public Void run() throws Exception {
-            runGetSplitsSync(splitFuturesRef, ugi);
+            runGetSplitsSync(splitFuturesRef, splitsRef, ugi);
             return null;
           }
         });
@@ -857,20 +844,43 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       }
     }
 
+
+
+
+
     private void runGetSplitsSync(List<Future<List<OrcSplit>>> splitFutures,
-        UserGroupInformation ugi) throws IOException {
-      List<SplitInfo> splits = getSplits();
-      List<Future<List<OrcSplit>>> localList = new ArrayList<>(splits.size());
+        List<OrcSplit> splits, UserGroupInformation ugi) throws IOException {
       UserGroupInformation tpUgi = ugi == null ? UserGroupInformation.getCurrentUser() : ugi;
-      for (SplitInfo splitInfo : splits) {
-        localList.add(Context.threadPool.submit(
-            new SplitGenerator(splitInfo, tpUgi, allowSyntheticFileIds)));
+      List<SplitInfo> splitInfos = getSplits();
+      List<Future<List<OrcSplit>>> localListF = null;
+      List<OrcSplit> localListS = null;
+      for (SplitInfo splitInfo : splitInfos) {
+        SplitGenerator sg = new SplitGenerator(splitInfo, tpUgi, allowSyntheticFileIds);
+        if (!sg.isBlocking()) {
+          if (localListS == null) {
+            localListS = new ArrayList<>(splits.size());
+          }
+          // Already called in doAs, so no need to doAs here.
+          localListS.addAll(sg.call());
+        } else {
+          if (localListF == null) {
+            localListF = new ArrayList<>(splits.size());
+          }
+          localListF.add(Context.threadPool.submit(sg));
+        }
       }
-      synchronized (splitFutures) {
-        splitFutures.addAll(localList);
+      if (localListS != null) {
+        synchronized (splits) {
+          splits.addAll(localListS);
+        }
       }
-    }
-  }
+      if (localListF != null) {
+        synchronized (splitFutures) {
+          splitFutures.addAll(localListF);
+        }
+       }
+     }
+   }
 
   /**
    * BI strategy is used when the requirement is to spend less time in split generation
@@ -1018,7 +1028,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         try {
           return SHIMS.listLocatedHdfsStatus(fs, base, AcidUtils.hiddenFileFilter);
         } catch (Throwable t) {
-          LOG.error("Failed to get files with ID; using regular API", t);
+          LOG.error("Failed to get files with ID; using regular API: " + t.getMessage());
         }
       }
 
@@ -1055,6 +1065,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     private OrcFile.WriterVersion writerVersion;
     private long projColsUncompressedSize;
     private final List<OrcSplit> deltaSplits;
+    private final ByteBuffer ppdResult;
     private final UserGroupInformation ugi;
     private final boolean allowSyntheticFileIds;
 
@@ -1075,6 +1086,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
       this.projColsUncompressedSize = -1;
       this.deltaSplits = splitInfo.getSplits();
       this.allowSyntheticFileIds = allowSyntheticFileIds;
+      this.ppdResult = splitInfo.ppdResult;
+    }
+
+    public boolean isBlocking() {
+      return ppdResult != null;
     }
 
     Path getPath() {
@@ -1182,6 +1198,20 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           fileMetaInfo, isOriginal, hasBase, deltas, scaledProjSize);
     }
 
+    private static final class OffsetAndLength { // Java cruft; pair of long.
+      public OffsetAndLength() {
+        this.offset = -1;
+        this.length = 0;
+      }
+
+      long offset, length;
+
+      @Override
+      public String toString() {
+        return "[offset=" + offset + ", length=" + length + "]";
+      }
+    }
+
     /**
      * Divide the adjacent stripes in the file into input splits based on the
      * block size and the configured minimum and maximum sizes.
@@ -1204,74 +1234,122 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
 
     private List<OrcSplit> callInternal() throws IOException {
-      populateAndCacheStripeDetails();
-      List<OrcSplit> splits = Lists.newArrayList();
-
-      // figure out which stripes we need to read
-      boolean[] includeStripe = null;
+      // Figure out which stripes we need to read.
+      if (ppdResult != null) {
+        assert deltaSplits.isEmpty();
+        assert ppdResult.hasArray();
+
+        // TODO: when PB is upgraded to 2.6, newInstance(ByteBuffer) method should be used here.
+        CodedInputStream cis = CodedInputStream.newInstance(
+            ppdResult.array(), ppdResult.arrayOffset(), ppdResult.remaining());
+        cis.setSizeLimit(InStream.PROTOBUF_MESSAGE_MAX_LIMIT);
+        return generateSplitsFromPpd(SplitInfos.parseFrom(cis));
+      } else {
+        populateAndCacheStripeDetails();
+        boolean[] includeStripe = null;
+        // We can't eliminate stripes if there are deltas because the
+        // deltas may change the rows making them match the predicate.
+        if ((deltas == null || deltas.isEmpty()) && context.sarg != null) {
+          String[] colNames = extractNeededColNames(types, context.conf, includedCols, isOriginal);
+          if (colNames == null) {
+            LOG.warn("Skipping split elimination for {} as column names is null", file.getPath());
+          } else {
+            includeStripe = pickStripes(context.sarg, colNames, writerVersion, isOriginal,
+                stripeStats, stripes.size(), file.getPath());
+          }
+        }
+        return generateSplitsFromStripes(includeStripe);
+      }
+    }
 
-      // we can't eliminate stripes if there are deltas because the
-      // deltas may change the rows making them match the predicate.
-      if ((deltas == null || deltas.isEmpty()) && context.sarg != null) {
-        String[] colNames = extractNeededColNames(types, context.conf, includedCols, isOriginal);
-        if (colNames == null) {
-          LOG.warn("Skipping split elimination for {} as column names is null", file.getPath());
-        } else {
-          includeStripe = pickStripes(context.sarg, colNames, writerVersion, isOriginal,
-              stripeStats, stripes.size(), file.getPath());
+    private List<OrcSplit> generateSplitsFromPpd(SplitInfos ppdResult) throws IOException {
+      OffsetAndLength current = new OffsetAndLength();
+      List<OrcSplit> splits = new ArrayList<>(ppdResult.getInfosCount());
+      int lastIdx = -1;
+      for (Metastore.SplitInfo si : ppdResult.getInfosList()) {
+        int index = si.getIndex();
+        if (lastIdx >= 0 && lastIdx + 1 != index && current.offset != -1) {
+          // Create split for the previous unfinished stripe.
+          splits.add(createSplit(current.offset, current.length, null));
+          current.offset = -1;
+        }
+        lastIdx = index;
+        String debugStr = null;
+        if (LOG.isDebugEnabled()) {
+          debugStr = current.toString();
+        }
+        current = generateOrUpdateSplit(splits, current, si.getOffset(), si.getLength(), null);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Updated split from {" + index + ": " + si.getOffset() + ", "
+              + si.getLength() + "} and "+ debugStr + " to " + current);
         }
       }
+      generateLastSplit(splits, current, null);
+      return splits;
+    }
 
+    private List<OrcSplit> generateSplitsFromStripes(boolean[] includeStripe) throws IOException {
+      List<OrcSplit> splits = new ArrayList<>(stripes.size());
       // if we didn't have predicate pushdown, read everything
       if (includeStripe == null) {
         includeStripe = new boolean[stripes.size()];
         Arrays.fill(includeStripe, true);
       }
 
-      long currentOffset = -1;
-      long currentLength = 0;
+      OffsetAndLength current = new OffsetAndLength();
       int idx = -1;
       for (StripeInformation stripe : stripes) {
         idx++;
 
         if (!includeStripe[idx]) {
           // create split for the previous unfinished stripe
-          if (currentOffset != -1) {
-            splits.add(createSplit(currentOffset, currentLength, fileMetaInfo));
-            currentOffset = -1;
+          if (current.offset != -1) {
+            splits.add(createSplit(current.offset, current.length, fileMetaInfo));
+            current.offset = -1;
           }
           continue;
         }
 
-        // if we are working on a stripe, over the min stripe size, and
-        // crossed a block boundary, cut the input split here.
-        if (currentOffset != -1 && currentLength > context.minSize &&
-            (currentOffset / blockSize != stripe.getOffset() / blockSize)) {
-          splits.add(createSplit(currentOffset, currentLength, fileMetaInfo));
-          currentOffset = -1;
-        }
-        // if we aren't building a split, start a new one.
-        if (currentOffset == -1) {
-          currentOffset = stripe.getOffset();
-          currentLength = stripe.getLength();
-        } else {
-          currentLength =
-              (stripe.getOffset() + stripe.getLength()) - currentOffset;
-        }
-        if (currentLength >= context.maxSize) {
-          splits.add(createSplit(currentOffset, currentLength, fileMetaInfo));
-          currentOffset = -1;
-        }
-      }
-      if (currentOffset != -1) {
-        splits.add(createSplit(currentOffset, currentLength, fileMetaInfo));
+        current = generateOrUpdateSplit(
+            splits, current, stripe.getOffset(), stripe.getLength(), fileMetaInfo);
       }
+      generateLastSplit(splits, current, fileMetaInfo);
 
-      // add uncovered ACID delta splits
+      // Add uncovered ACID delta splits.
       splits.addAll(deltaSplits);
       return splits;
     }
 
+    private OffsetAndLength generateOrUpdateSplit(
+        List<OrcSplit> splits, OffsetAndLength current, long offset,
+        long length, FileMetaInfo fileMetaInfo) throws IOException {
+      // if we are working on a stripe, over the min stripe size, and
+      // crossed a block boundary, cut the input split here.
+      if (current.offset != -1 && current.length > context.minSize &&
+          (current.offset / blockSize != offset / blockSize)) {
+        splits.add(createSplit(current.offset, current.length, fileMetaInfo));
+        current.offset = -1;
+      }
+      // if we aren't building a split, start a new one.
+      if (current.offset == -1) {
+        current.offset = offset;
+        current.length = length;
+      } else {
+        current.length = (offset + length) - current.offset;
+      }
+      if (current.length >= context.maxSize) {
+        splits.add(createSplit(current.offset, current.length, fileMetaInfo));
+        current.offset = -1;
+      }
+      return current;
+    }
+
+    private void generateLastSplit(List<OrcSplit> splits, OffsetAndLength current,
+        FileMetaInfo fileMetaInfo) throws IOException {
+      if (current.offset == -1) return;
+      splits.add(createSplit(current.offset, current.length, fileMetaInfo));
+    }
+
     private void populateAndCacheStripeDetails() throws IOException {
       // Only create OrcReader if we are missing some information.
       List<OrcProto.ColumnStatistics> colStatsLocal;
@@ -1290,7 +1368,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
           assert fileInfo.stripeStats != null && fileInfo.types != null
               && fileInfo.writerVersion != null;
           // We assume that if we needed to create a reader, we need to cache it to meta cache.
-          // TODO: This will also needlessly overwrite it in local cache for now.
+          // This will also needlessly overwrite it in local cache for now.
           context.footerCache.put(fsFileId, file, fileInfo.fileMetaInfo, orcReader);
         }
       } else {
@@ -1330,10 +1408,6 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     }
   }
 
-  static List<OrcSplit> generateSplitsInfo(Configuration conf)
-      throws IOException {
-    return generateSplitsInfo(conf, -1);
-  }
 
   /** Class intended to update two values from methods... Java-related cruft. */
   @VisibleForTesting
@@ -1342,14 +1416,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     long combineStartUs;
   }
 
-  static List<OrcSplit> generateSplitsInfo(Configuration conf, int numSplits)
+  static List<OrcSplit> generateSplitsInfo(Configuration conf, Context context)
       throws IOException {
-    // Use threads to resolve directories into splits.
-    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED)) {
-      // Create HiveConf once, since this is expensive.
-      conf = new HiveConf(conf, OrcInputFormat.class);
-    }
-    Context context = new Context(conf, numSplits);
     if (LOG.isInfoEnabled()) {
       LOG.info("ORC pushdown predicate: " + context.sarg);
     }
@@ -1391,7 +1459,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         if (adi == null) {
           // We were combining SS-es and the time has expired.
           assert combinedCtx.combined != null;
-          scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures);
+          scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures, splits);
           combinedCtx.combined = null;
           continue;
         }
@@ -1409,7 +1477,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         // Hack note - different split strategies return differently typed lists, yay Java.
         // This works purely by magic, because we know which strategy produces which type.
         if (splitStrategy instanceof ETLSplitStrategy) {
-          scheduleSplits((ETLSplitStrategy)splitStrategy, context, splitFutures, strategyFutures);
+          scheduleSplits((ETLSplitStrategy)splitStrategy,
+              context, splitFutures, strategyFutures, splits);
         } else {
           @SuppressWarnings("unchecked")
           List<OrcSplit> readySplits = (List<OrcSplit>)splitStrategy.getSplits();
@@ -1419,7 +1488,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
 
       // Run the last combined strategy, if any.
       if (combinedCtx != null && combinedCtx.combined != null) {
-        scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures);
+        scheduleSplits(combinedCtx.combined, context, splitFutures, strategyFutures, splits);
         combinedCtx.combined = null;
       }
 
@@ -1452,10 +1521,18 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     return splits;
   }
 
+  @VisibleForTesting
+  // We could have this as a protected method w/no class, but half of Hive is static, so there.
+  public static class ContextFactory {
+    public Context create(Configuration conf, int numSplits) {
+      return new Context(conf, numSplits);
+    }
+  }
+
   private static void scheduleSplits(ETLSplitStrategy splitStrategy, Context context,
-      List<Future<List<OrcSplit>>> splitFutures, List<Future<Void>> strategyFutures)
-          throws IOException {
-    Future<Void> ssFuture = splitStrategy.generateSplitWork(context, splitFutures);
+      List<Future<List<OrcSplit>>> splitFutures, List<Future<Void>> strategyFutures,
+      List<OrcSplit> splits) throws IOException {
+    Future<Void> ssFuture = splitStrategy.generateSplitWork(context, splitFutures, splits);
     if (ssFuture == null) return;
     strategyFutures.add(ssFuture);
   }
@@ -1504,7 +1581,13 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     if (isDebugEnabled) {
       LOG.debug("getSplits started");
     }
-    List<OrcSplit> result = generateSplitsInfo(job, numSplits);
+    Configuration conf = job;
+    if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED)) {
+      // Create HiveConf once, since this is expensive.
+      conf = new HiveConf(conf, OrcInputFormat.class);
+    }
+    List<OrcSplit> result = generateSplitsInfo(conf,
+        new Context(conf, numSplits, createExternalCaches()));
     if (isDebugEnabled) {
       LOG.debug("getSplits finished");
     }
@@ -1517,10 +1600,10 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
    * Stores information relevant to split generation for an ORC File.
    *
    */
-  private static class FileInfo {
-    private final long modificationTime;
-    private final long size;
-    private final Long fileId;
+  static class FileInfo {
+    final long modificationTime;
+    final long size;
+    final Long fileId;
     private final List<StripeInformation> stripeInfos;
     private FileMetaInfo fileMetaInfo;
     private final List<StripeStatistics> stripeStats;
@@ -1898,196 +1981,16 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
    * Represents footer cache.
    */
   public interface FooterCache {
-    FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) throws IOException;
+    static final ByteBuffer NO_SPLIT_AFTER_PPD = ByteBuffer.wrap(new byte[0]);
+
+    void getAndValidate(List<HdfsFileStatusWithId> files, boolean isOriginal,
+        FileInfo[] result, ByteBuffer[] ppdResult) throws IOException, HiveException;
+    boolean hasPpd();
     boolean isBlocking();
     void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader)
         throws IOException;
   }
 
-  /** Local footer cache using Guava. Stores convoluted Java objects. */
-  private static class LocalCache implements FooterCache {
-    private final Cache<Path, FileInfo> cache;
-
-    public LocalCache(int numThreads, int cacheStripeDetailsSize) {
-      cache = CacheBuilder.newBuilder()
-        .concurrencyLevel(numThreads)
-        .initialCapacity(cacheStripeDetailsSize)
-        .maximumSize(cacheStripeDetailsSize)
-        .softValues()
-        .build();
-    }
-
-    @Override
-    public FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) {
-      // TODO: should local cache also be by fileId? Preserve the original logic for now.
-      FileInfo[] result = new FileInfo[files.size()];
-      int i = -1;
-      for (HdfsFileStatusWithId fileWithId : files) {
-        ++i;
-        FileStatus file = fileWithId.getFileStatus();
-        Path path = file.getPath();
-        Long fileId = fileWithId.getFileId();
-        FileInfo fileInfo = cache.getIfPresent(path);
-        if (isDebugEnabled) {
-          LOG.debug("Info " + (fileInfo == null ? "not " : "") + "cached for path: " + path);
-        }
-        if (fileInfo == null) continue;
-        if ((fileId != null && fileInfo.fileId != null && fileId == fileInfo.fileId)
-            || (fileInfo.modificationTime == file.getModificationTime() &&
-            fileInfo.size == file.getLen())) {
-          result[i] = fileInfo;
-          continue;
-        }
-        // Invalidate
-        cache.invalidate(path);
-        if (isDebugEnabled) {
-          LOG.debug("Meta-Info for : " + path + " changed. CachedModificationTime: "
-              + fileInfo.modificationTime + ", CurrentModificationTime: "
-              + file.getModificationTime() + ", CachedLength: " + fileInfo.size
-              + ", CurrentLength: " + file.getLen());
-        }
-      }
-      return result;
-    }
-
-    public void put(Path path, FileInfo fileInfo) {
-      cache.put(path, fileInfo);
-    }
-
-    @Override
-    public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader)
-        throws IOException {
-      cache.put(file.getPath(), new FileInfo(file.getModificationTime(), file.getLen(),
-          orcReader.getStripes(), orcReader.getStripeStatistics(), orcReader.getTypes(),
-          orcReader.getOrcProtoFileStatistics(), fileMetaInfo, orcReader.getWriterVersion(),
-          fileId));
-    }
-
-    @Override
-    public boolean isBlocking() {
-      return false;
-    }
-  }
-
-  /** Metastore-based footer cache storing serialized footers. Also has a local cache. */
-  public static class MetastoreCache implements FooterCache {
-    private final LocalCache localCache;
-    private boolean isWarnLogged = false;
-    private HiveConf conf;
-
-    public MetastoreCache(LocalCache lc) {
-      localCache = lc;
-    }
-
-    @Override
-    public FileInfo[] getAndValidate(List<HdfsFileStatusWithId> files) throws IOException {
-      // First, check the local cache.
-      FileInfo[] result = localCache.getAndValidate(files);
-      assert result.length == files.size();
-      // This is an unfortunate consequence of batching/iterating thru MS results.
-      // TODO: maybe have a direct map call for small lists if this becomes a perf issue.
-      HashMap<Long, Integer> posMap = new HashMap<>(files.size());
-      for (int i = 0; i < result.length; ++i) {
-        if (result[i] != null) continue;
-        HdfsFileStatusWithId file = files.get(i);
-        Long fileId = file.getFileId();
-        if (fileId == null) {
-          if (!isWarnLogged || isDebugEnabled) {
-            LOG.warn("Not using metastore cache because fileId is missing: "
-                + file.getFileStatus().getPath());
-            isWarnLogged = true;
-          }
-          continue;
-        }
-        posMap.put(fileId, i);
-      }
-      Iterator<Entry<Long, ByteBuffer>> iter = null;
-      Hive hive;
-      try {
-        hive = getHive();
-        iter = hive.getFileMetadata(Lists.newArrayList(posMap.keySet()), conf).iterator();
-      } catch (HiveException ex) {
-        throw new IOException(ex);
-      }
-      List<Long> corruptIds = null;
-      while (iter.hasNext()) {
-        Entry<Long, ByteBuffer> e = iter.next();
-        int ix = posMap.get(e.getKey());
-        assert result[ix] == null;
-        HdfsFileStatusWithId file = files.get(ix);
-        assert file.getFileId() == e.getKey();
-        result[ix] = createFileInfoFromMs(file, e.getValue());
-        if (result[ix] == null) {
-          if (corruptIds == null) {
-            corruptIds = new ArrayList<>();
-          }
-          corruptIds.add(file.getFileId());
-        } else {
-          localCache.put(file.getFileStatus().getPath(), result[ix]);
-        }
-      }
-      if (corruptIds != null) {
-        try {
-          hive.clearFileMetadata(corruptIds);
-        } catch (HiveException ex) {
-          LOG.error("Failed to clear corrupt cache data", ex);
-        }
-      }
-      return result;
-    }
-
-    private Hive getHive() throws HiveException {
-      // TODO: we wish we could cache the Hive object, but it's not thread safe, and each
-      //       threadlocal we "cache" would need to be reinitialized for every query. This is
-      //       a huge PITA. Hive object will be cached internally, but the compat check will be
-      //       done every time inside get().
-      return Hive.getWithFastCheck(conf);
-    }
-
-    private static FileInfo createFileInfoFromMs(
-        HdfsFileStatusWithId file, ByteBuffer bb) throws IOException {
-      FileStatus fs = file.getFileStatus();
-      ReaderImpl.FooterInfo fi = null;
-      ByteBuffer original = bb.duplicate();
-      try {
-        fi = ReaderImpl.extractMetaInfoFromFooter(bb, fs.getPath());
-      } catch (Exception ex) {
-        byte[] data = new byte[original.remaining()];
-        System.arraycopy(original.array(), original.arrayOffset() + original.position(),
-            data, 0, data.length);
-        String msg = "Failed to parse the footer stored in cache for file ID "
-            + file.getFileId() + " " + original + " [ " + Hex.encodeHexString(data) + " ]";
-        LOG.error(msg, ex);
-        return null;
-      }
-      return new FileInfo(fs.getModificationTime(), fs.getLen(), fi.getStripes(), fi.getMetadata(),
-          fi.getFooter().getTypesList(), fi.getFooter().getStatisticsList(), fi.getFileMetaInfo(),
-          fi.getFileMetaInfo().writerVersion, file.getFileId());
-    }
-
-    @Override
-    public void put(Long fileId, FileStatus file, FileMetaInfo fileMetaInfo, Reader orcReader)
-        throws IOException {
-      localCache.put(fileId, file, fileMetaInfo, orcReader);
-      if (fileId != null) {
-        try {
-          getHive().putFileMetadata(Lists.newArrayList(fileId),
-              Lists.newArrayList(((ReaderImpl)orcReader).getSerializedFileFooter()));
-        } catch (HiveException e) {
-          throw new IOException(e);
-        }
-      }
-    }
-
-    public void configure(HiveConf queryConfig) {
-      this.conf = queryConfig;
-    }
-
-    @Override
-    public boolean isBlocking() {
-      return true;
-    }
-  }
   /**
    * Convert a Hive type property string that contains separated type names into a list of
    * TypeDescription objects.
@@ -2283,4 +2186,8 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
     return result;
   }
 
+  @VisibleForTesting
+  protected ExternalFooterCachesByConf createExternalCaches() {
+    return null; // The default ones are created in case of null; tests override this.
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
index 2782d7e..c4a7226 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java
@@ -25,6 +25,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
@@ -121,9 +123,8 @@ public class OrcNewInputFormat extends InputFormat<NullWritable, OrcStruct>{
     if (LOG.isDebugEnabled()) {
       LOG.debug("getSplits started");
     }
-    List<OrcSplit> splits =
-        OrcInputFormat.generateSplitsInfo(ShimLoader.getHadoopShims()
-        .getConfiguration(jobContext));
+    Configuration conf = ShimLoader.getHadoopShims().getConfiguration(jobContext);
+    List<OrcSplit> splits = OrcInputFormat.generateSplitsInfo(conf, createContext(conf, -1));
     List<InputSplit> result = new ArrayList<InputSplit>(splits.size());
     for(OrcSplit split: splits) {
       result.add(new OrcNewSplit(split));
@@ -134,4 +135,13 @@ public class OrcNewInputFormat extends InputFormat<NullWritable, OrcStruct>{
     return result;
   }
 
+  // Nearly C/P from OrcInputFormat; there are too many statics everywhere to sort this out.
+  private Context createContext(Configuration conf, int numSplits) {
+    // Use threads to resolve directories into splits.
+    if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED)) {
+      // Create HiveConf once, since this is expensive.
+      conf = new HiveConf(conf, OrcInputFormat.class);
+    }
+    return new Context(conf, numSplits, null);
+  }
 }


[06/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index 18cb41a..6e5de20 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -29527,11 +29527,11 @@ uint32_t ThriftHiveMetastore_cancel_delegation_token_presult::read(::apache::thr
 }
 
 
-ThriftHiveMetastore_get_open_txns_args::~ThriftHiveMetastore_get_open_txns_args() throw() {
+ThriftHiveMetastore_add_token_args::~ThriftHiveMetastore_add_token_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_add_token_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29550,7 +29550,28 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    xfer += iprot->skip(ftype);
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->token_identifier);
+          this->__isset.token_identifier = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->delegation_token);
+          this->__isset.delegation_token = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
     xfer += iprot->readFieldEnd();
   }
 
@@ -29559,10 +29580,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_add_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_token_args");
+
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->token_identifier);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("delegation_token", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->delegation_token);
+  xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -29570,14 +29599,22 @@ uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protoco
 }
 
 
-ThriftHiveMetastore_get_open_txns_pargs::~ThriftHiveMetastore_get_open_txns_pargs() throw() {
+ThriftHiveMetastore_add_token_pargs::~ThriftHiveMetastore_add_token_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_add_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_token_pargs");
+
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->token_identifier)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("delegation_token", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->delegation_token)));
+  xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -29585,11 +29622,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protoc
 }
 
 
-ThriftHiveMetastore_get_open_txns_result::~ThriftHiveMetastore_get_open_txns_result() throw() {
+ThriftHiveMetastore_add_token_result::~ThriftHiveMetastore_add_token_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_add_token_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29611,8 +29648,8 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protoc
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->success);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -29630,15 +29667,15 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protoc
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_add_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_token_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0);
+    xfer += oprot->writeBool(this->success);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -29647,11 +29684,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::proto
 }
 
 
-ThriftHiveMetastore_get_open_txns_presult::~ThriftHiveMetastore_get_open_txns_presult() throw() {
+ThriftHiveMetastore_add_token_presult::~ThriftHiveMetastore_add_token_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_add_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29673,8 +29710,8 @@ uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::proto
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool((*(this->success)));
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -29693,11 +29730,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::proto
 }
 
 
-ThriftHiveMetastore_get_open_txns_info_args::~ThriftHiveMetastore_get_open_txns_info_args() throw() {
+ThriftHiveMetastore_remove_token_args::~ThriftHiveMetastore_remove_token_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_token_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29716,7 +29753,20 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::pro
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    xfer += iprot->skip(ftype);
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->token_identifier);
+          this->__isset.token_identifier = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
     xfer += iprot->readFieldEnd();
   }
 
@@ -29725,10 +29775,14 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::pro
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_token_args");
+
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->token_identifier);
+  xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -29736,14 +29790,18 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::pr
 }
 
 
-ThriftHiveMetastore_get_open_txns_info_pargs::~ThriftHiveMetastore_get_open_txns_info_pargs() throw() {
+ThriftHiveMetastore_remove_token_pargs::~ThriftHiveMetastore_remove_token_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_token_pargs");
+
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->token_identifier)));
+  xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -29751,11 +29809,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::p
 }
 
 
-ThriftHiveMetastore_get_open_txns_info_result::~ThriftHiveMetastore_get_open_txns_info_result() throw() {
+ThriftHiveMetastore_remove_token_result::~ThriftHiveMetastore_remove_token_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_token_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29777,8 +29835,8 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::p
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->success);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -29796,15 +29854,15 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::p
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_token_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0);
+    xfer += oprot->writeBool(this->success);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -29813,11 +29871,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift::
 }
 
 
-ThriftHiveMetastore_get_open_txns_info_presult::~ThriftHiveMetastore_get_open_txns_info_presult() throw() {
+ThriftHiveMetastore_remove_token_presult::~ThriftHiveMetastore_remove_token_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29839,8 +29897,8 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool((*(this->success)));
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -29859,11 +29917,11 @@ uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::
 }
 
 
-ThriftHiveMetastore_open_txns_args::~ThriftHiveMetastore_open_txns_args() throw() {
+ThriftHiveMetastore_get_token_args::~ThriftHiveMetastore_get_token_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_token_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29885,9 +29943,9 @@ uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TP
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->token_identifier);
+          this->__isset.token_identifier = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -29904,13 +29962,13 @@ uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TP
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_token_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_token_args");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->token_identifier);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -29919,17 +29977,17 @@ uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::T
 }
 
 
-ThriftHiveMetastore_open_txns_pargs::~ThriftHiveMetastore_open_txns_pargs() throw() {
+ThriftHiveMetastore_get_token_pargs::~ThriftHiveMetastore_get_token_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_token_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_token_pargs");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldBegin("token_identifier", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->token_identifier)));
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -29938,11 +29996,11 @@ uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::
 }
 
 
-ThriftHiveMetastore_open_txns_result::~ThriftHiveMetastore_open_txns_result() throw() {
+ThriftHiveMetastore_get_token_result::~ThriftHiveMetastore_get_token_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_token_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -29964,8 +30022,8 @@ uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->success);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -29983,15 +30041,15 @@ uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_token_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_token_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+    xfer += oprot->writeString(this->success);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -30000,11 +30058,11 @@ uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_open_txns_presult::~ThriftHiveMetastore_open_txns_presult() throw() {
+ThriftHiveMetastore_get_token_presult::~ThriftHiveMetastore_get_token_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_token_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30026,8 +30084,8 @@ uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol:
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString((*(this->success)));
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -30046,11 +30104,69 @@ uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_abort_txn_args::~ThriftHiveMetastore_abort_txn_args() throw() {
+ThriftHiveMetastore_get_all_token_identifiers_args::~ThriftHiveMetastore_get_all_token_identifiers_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_token_identifiers_args");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_token_identifiers_pargs::~ThriftHiveMetastore_get_all_token_identifiers_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_token_identifiers_pargs");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_token_identifiers_result::~ThriftHiveMetastore_get_all_token_identifiers_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30071,10 +30187,22 @@ uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TP
     }
     switch (fid)
     {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size1289;
+            ::apache::thrift::protocol::TType _etype1292;
+            xfer += iprot->readListBegin(_etype1292, _size1289);
+            this->success.resize(_size1289);
+            uint32_t _i1293;
+            for (_i1293 = 0; _i1293 < _size1289; ++_i1293)
+            {
+              xfer += iprot->readString(this->success[_i1293]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -30091,45 +30219,94 @@ uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TP
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
   uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_args");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_token_identifiers_result");
 
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+      std::vector<std::string> ::const_iterator _iter1294;
+      for (_iter1294 = this->success.begin(); _iter1294 != this->success.end(); ++_iter1294)
+      {
+        xfer += oprot->writeString((*_iter1294));
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
 
-ThriftHiveMetastore_abort_txn_pargs::~ThriftHiveMetastore_abort_txn_pargs() throw() {
+ThriftHiveMetastore_get_all_token_identifiers_presult::~ThriftHiveMetastore_get_all_token_identifiers_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
-  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_pargs");
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size1295;
+            ::apache::thrift::protocol::TType _etype1298;
+            xfer += iprot->readListBegin(_etype1298, _size1295);
+            (*(this->success)).resize(_size1295);
+            uint32_t _i1299;
+            for (_i1299 = 0; _i1299 < _size1295; ++_i1299)
+            {
+              xfer += iprot->readString((*(this->success))[_i1299]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
 
-  xfer += oprot->writeFieldStop();
-  xfer += oprot->writeStructEnd();
   return xfer;
 }
 
 
-ThriftHiveMetastore_abort_txn_result::~ThriftHiveMetastore_abort_txn_result() throw() {
+ThriftHiveMetastore_add_master_key_args::~ThriftHiveMetastore_add_master_key_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_add_master_key_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30151,6 +30328,93 @@ uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::
     switch (fid)
     {
       case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->key);
+          this->__isset.key = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_add_master_key_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_master_key_args");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->key);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_add_master_key_pargs::~ThriftHiveMetastore_add_master_key_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_add_master_key_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_master_key_pargs");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->key)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_add_master_key_result::~ThriftHiveMetastore_add_master_key_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_add_master_key_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->success);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
           xfer += this->o1.read(iprot);
           this->__isset.o1 = true;
@@ -30170,13 +30434,17 @@ uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_add_master_key_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_add_master_key_result");
 
-  if (this->__isset.o1) {
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_I32, 0);
+    xfer += oprot->writeI32(this->success);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
     xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
     xfer += this->o1.write(oprot);
     xfer += oprot->writeFieldEnd();
@@ -30187,11 +30455,11 @@ uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_abort_txn_presult::~ThriftHiveMetastore_abort_txn_presult() throw() {
+ThriftHiveMetastore_add_master_key_presult::~ThriftHiveMetastore_add_master_key_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_add_master_key_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30212,6 +30480,14 @@ uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol:
     }
     switch (fid)
     {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32((*(this->success)));
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
           xfer += this->o1.read(iprot);
@@ -30233,11 +30509,11 @@ uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_commit_txn_args::~ThriftHiveMetastore_commit_txn_args() throw() {
+ThriftHiveMetastore_update_master_key_args::~ThriftHiveMetastore_update_master_key_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_update_master_key_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30259,9 +30535,17 @@ uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::T
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->seq_number);
+          this->__isset.seq_number = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->key);
+          this->__isset.key = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -30278,13 +30562,17 @@ uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::T
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_update_master_key_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_master_key_args");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldBegin("seq_number", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32(this->seq_number);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->key);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -30293,17 +30581,21 @@ uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::
 }
 
 
-ThriftHiveMetastore_commit_txn_pargs::~ThriftHiveMetastore_commit_txn_pargs() throw() {
+ThriftHiveMetastore_update_master_key_pargs::~ThriftHiveMetastore_update_master_key_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_update_master_key_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_master_key_pargs");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldBegin("seq_number", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32((*(this->seq_number)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->key)));
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -30312,11 +30604,11 @@ uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_commit_txn_result::~ThriftHiveMetastore_commit_txn_result() throw() {
+ThriftHiveMetastore_update_master_key_result::~ThriftHiveMetastore_update_master_key_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_update_master_key_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30365,11 +30657,11 @@ uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol:
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_update_master_key_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_master_key_result");
 
   if (this->__isset.o1) {
     xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
@@ -30386,11 +30678,11 @@ uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_commit_txn_presult::~ThriftHiveMetastore_commit_txn_presult() throw() {
+ThriftHiveMetastore_update_master_key_presult::~ThriftHiveMetastore_update_master_key_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_update_master_key_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30440,11 +30732,11 @@ uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() {
+ThriftHiveMetastore_remove_master_key_args::~ThriftHiveMetastore_remove_master_key_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_master_key_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30466,9 +30758,9 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->key_seq);
+          this->__isset.key_seq = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -30485,13 +30777,13 @@ uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtoc
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_master_key_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_master_key_args");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32(this->key_seq);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -30500,17 +30792,17 @@ uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProto
 }
 
 
-ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() {
+ThriftHiveMetastore_remove_master_key_pargs::~ThriftHiveMetastore_remove_master_key_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_master_key_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_master_key_pargs");
 
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32((*(this->key_seq)));
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -30519,11 +30811,11 @@ uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProt
 }
 
 
-ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() {
+ThriftHiveMetastore_remove_master_key_result::~ThriftHiveMetastore_remove_master_key_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_master_key_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30545,29 +30837,13 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->success);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -30580,23 +30856,15 @@ uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProt
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_remove_master_key_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_remove_master_key_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o1) {
-    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
-    xfer += this->o1.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o2) {
-    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
-    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0);
+    xfer += oprot->writeBool(this->success);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -30605,11 +30873,11 @@ uint32_t ThriftHiveMetastore_lock_result::write(::apache::thrift::protocol::TPro
 }
 
 
-ThriftHiveMetastore_lock_presult::~ThriftHiveMetastore_lock_presult() throw() {
+ThriftHiveMetastore_remove_master_key_presult::~ThriftHiveMetastore_remove_master_key_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_remove_master_key_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30631,29 +30899,13 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool((*(this->success)));
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -30667,11 +30919,11 @@ uint32_t ThriftHiveMetastore_lock_presult::read(::apache::thrift::protocol::TPro
 }
 
 
-ThriftHiveMetastore_check_lock_args::~ThriftHiveMetastore_check_lock_args() throw() {
+ThriftHiveMetastore_get_master_keys_args::~ThriftHiveMetastore_get_master_keys_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_master_keys_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30690,20 +30942,7 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
+    xfer += iprot->skip(ftype);
     xfer += iprot->readFieldEnd();
   }
 
@@ -30712,14 +30951,10 @@ uint32_t ThriftHiveMetastore_check_lock_args::read(::apache::thrift::protocol::T
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_master_keys_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_args");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_master_keys_args");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -30727,18 +30962,14 @@ uint32_t ThriftHiveMetastore_check_lock_args::write(::apache::thrift::protocol::
 }
 
 
-ThriftHiveMetastore_check_lock_pargs::~ThriftHiveMetastore_check_lock_pargs() throw() {
+ThriftHiveMetastore_get_master_keys_pargs::~ThriftHiveMetastore_get_master_keys_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_master_keys_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_pargs");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_master_keys_pargs");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -30746,11 +30977,11 @@ uint32_t ThriftHiveMetastore_check_lock_pargs::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_check_lock_result::~ThriftHiveMetastore_check_lock_result() throw() {
+ThriftHiveMetastore_get_master_keys_result::~ThriftHiveMetastore_get_master_keys_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30772,37 +31003,25 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol:
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size1300;
+            ::apache::thrift::protocol::TType _etype1303;
+            xfer += iprot->readListBegin(_etype1303, _size1300);
+            this->success.resize(_size1300);
+            uint32_t _i1304;
+            for (_i1304 = 0; _i1304 < _size1300; ++_i1304)
+            {
+              xfer += iprot->readString(this->success[_i1304]);
+            }
+            xfer += iprot->readListEnd();
+          }
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o3.read(iprot);
-          this->__isset.o3 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -30815,27 +31034,23 @@ uint32_t ThriftHiveMetastore_check_lock_result::read(::apache::thrift::protocol:
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_check_lock_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_master_keys_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o1) {
-    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
-    xfer += this->o1.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o2) {
-    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
-    xfer += this->o2.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o3) {
-    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
-    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+      std::vector<std::string> ::const_iterator _iter1305;
+      for (_iter1305 = this->success.begin(); _iter1305 != this->success.end(); ++_iter1305)
+      {
+        xfer += oprot->writeString((*_iter1305));
+      }
+      xfer += oprot->writeListEnd();
+    }
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -30844,11 +31059,11 @@ uint32_t ThriftHiveMetastore_check_lock_result::write(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_check_lock_presult::~ThriftHiveMetastore_check_lock_presult() throw() {
+ThriftHiveMetastore_get_master_keys_presult::~ThriftHiveMetastore_get_master_keys_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30870,37 +31085,25 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size1306;
+            ::apache::thrift::protocol::TType _etype1309;
+            xfer += iprot->readListBegin(_etype1309, _size1306);
+            (*(this->success)).resize(_size1306);
+            uint32_t _i1310;
+            for (_i1310 = 0; _i1310 < _size1306; ++_i1310)
+            {
+              xfer += iprot->readString((*(this->success))[_i1310]);
+            }
+            xfer += iprot->readListEnd();
+          }
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o3.read(iprot);
-          this->__isset.o3 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -30914,11 +31117,11 @@ uint32_t ThriftHiveMetastore_check_lock_presult::read(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_unlock_args::~ThriftHiveMetastore_unlock_args() throw() {
+ThriftHiveMetastore_get_open_txns_args::~ThriftHiveMetastore_get_open_txns_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -30937,20 +31140,7 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
+    xfer += iprot->skip(ftype);
     xfer += iprot->readFieldEnd();
   }
 
@@ -30959,14 +31149,10 @@ uint32_t ThriftHiveMetastore_unlock_args::read(::apache::thrift::protocol::TProt
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_args");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_args");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -30974,18 +31160,14 @@ uint32_t ThriftHiveMetastore_unlock_args::write(::apache::thrift::protocol::TPro
 }
 
 
-ThriftHiveMetastore_unlock_pargs::~ThriftHiveMetastore_unlock_pargs() throw() {
+ThriftHiveMetastore_get_open_txns_pargs::~ThriftHiveMetastore_get_open_txns_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_pargs");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_pargs");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -30993,11 +31175,11 @@ uint32_t ThriftHiveMetastore_unlock_pargs::write(::apache::thrift::protocol::TPr
 }
 
 
-ThriftHiveMetastore_unlock_result::~ThriftHiveMetastore_unlock_result() throw() {
+ThriftHiveMetastore_get_open_txns_result::~ThriftHiveMetastore_get_open_txns_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31018,18 +31200,10 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr
     }
     switch (fid)
     {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
+      case 0:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31046,19 +31220,15 @@ uint32_t ThriftHiveMetastore_unlock_result::read(::apache::thrift::protocol::TPr
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_unlock_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_result");
 
-  if (this->__isset.o1) {
-    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
-    xfer += this->o1.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o2) {
-    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
-    xfer += this->o2.write(oprot);
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -31067,11 +31237,11 @@ uint32_t ThriftHiveMetastore_unlock_result::write(::apache::thrift::protocol::TP
 }
 
 
-ThriftHiveMetastore_unlock_presult::~ThriftHiveMetastore_unlock_presult() throw() {
+ThriftHiveMetastore_get_open_txns_presult::~ThriftHiveMetastore_get_open_txns_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31092,18 +31262,10 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP
     }
     switch (fid)
     {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
+      case 0:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31121,11 +31283,11 @@ uint32_t ThriftHiveMetastore_unlock_presult::read(::apache::thrift::protocol::TP
 }
 
 
-ThriftHiveMetastore_show_locks_args::~ThriftHiveMetastore_show_locks_args() throw() {
+ThriftHiveMetastore_get_open_txns_info_args::~ThriftHiveMetastore_get_open_txns_info_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_info_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31144,20 +31306,7 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    switch (fid)
-    {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->rqst.read(iprot);
-          this->__isset.rqst = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      default:
-        xfer += iprot->skip(ftype);
-        break;
-    }
+    xfer += iprot->skip(ftype);
     xfer += iprot->readFieldEnd();
   }
 
@@ -31166,14 +31315,10 @@ uint32_t ThriftHiveMetastore_show_locks_args::read(::apache::thrift::protocol::T
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_info_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_args");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->rqst.write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_args");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -31181,18 +31326,14 @@ uint32_t ThriftHiveMetastore_show_locks_args::write(::apache::thrift::protocol::
 }
 
 
-ThriftHiveMetastore_show_locks_pargs::~ThriftHiveMetastore_show_locks_pargs() throw() {
+ThriftHiveMetastore_get_open_txns_info_pargs::~ThriftHiveMetastore_get_open_txns_info_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_info_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_pargs");
-
-  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->rqst)).write(oprot);
-  xfer += oprot->writeFieldEnd();
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_pargs");
 
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
@@ -31200,11 +31341,11 @@ uint32_t ThriftHiveMetastore_show_locks_pargs::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_show_locks_result::~ThriftHiveMetastore_show_locks_result() throw() {
+ThriftHiveMetastore_get_open_txns_info_result::~ThriftHiveMetastore_get_open_txns_info_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_info_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31245,11 +31386,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::read(::apache::thrift::protocol:
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_get_open_txns_info_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_locks_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_open_txns_info_result");
 
   if (this->__isset.success) {
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
@@ -31262,11 +31403,11 @@ uint32_t ThriftHiveMetastore_show_locks_result::write(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_show_locks_presult::~ThriftHiveMetastore_show_locks_presult() throw() {
+ThriftHiveMetastore_get_open_txns_info_presult::~ThriftHiveMetastore_get_open_txns_info_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_get_open_txns_info_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31308,11 +31449,11 @@ uint32_t ThriftHiveMetastore_show_locks_presult::read(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_heartbeat_args::~ThriftHiveMetastore_heartbeat_args() throw() {
+ThriftHiveMetastore_open_txns_args::~ThriftHiveMetastore_open_txns_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_open_txns_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31335,8 +31476,8 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP
     {
       case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->ids.read(iprot);
-          this->__isset.ids = true;
+          xfer += this->rqst.read(iprot);
+          this->__isset.rqst = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31353,13 +31494,13 @@ uint32_t ThriftHiveMetastore_heartbeat_args::read(::apache::thrift::protocol::TP
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_open_txns_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_args");
 
-  xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->ids.write(oprot);
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->rqst.write(oprot);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -31368,17 +31509,17 @@ uint32_t ThriftHiveMetastore_heartbeat_args::write(::apache::thrift::protocol::T
 }
 
 
-ThriftHiveMetastore_heartbeat_pargs::~ThriftHiveMetastore_heartbeat_pargs() throw() {
+ThriftHiveMetastore_open_txns_pargs::~ThriftHiveMetastore_open_txns_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_open_txns_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_pargs");
 
-  xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->ids)).write(oprot);
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->rqst)).write(oprot);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -31387,11 +31528,11 @@ uint32_t ThriftHiveMetastore_heartbeat_pargs::write(::apache::thrift::protocol::
 }
 
 
-ThriftHiveMetastore_heartbeat_result::~ThriftHiveMetastore_heartbeat_result() throw() {
+ThriftHiveMetastore_open_txns_result::~ThriftHiveMetastore_open_txns_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_open_txns_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31412,26 +31553,10 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::
     }
     switch (fid)
     {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
+      case 0:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o3.read(iprot);
-          this->__isset.o3 = true;
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31448,23 +31573,15 @@ uint32_t ThriftHiveMetastore_heartbeat_result::read(::apache::thrift::protocol::
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_open_txns_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_open_txns_result");
 
-  if (this->__isset.o1) {
-    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
-    xfer += this->o1.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o2) {
-    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
-    xfer += this->o2.write(oprot);
-    xfer += oprot->writeFieldEnd();
-  } else if (this->__isset.o3) {
-    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
-    xfer += this->o3.write(oprot);
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -31473,11 +31590,11 @@ uint32_t ThriftHiveMetastore_heartbeat_result::write(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_heartbeat_presult::~ThriftHiveMetastore_heartbeat_presult() throw() {
+ThriftHiveMetastore_open_txns_presult::~ThriftHiveMetastore_open_txns_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_open_txns_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31498,26 +31615,10 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol:
     }
     switch (fid)
     {
-      case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o1.read(iprot);
-          this->__isset.o1 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o2.read(iprot);
-          this->__isset.o2 = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
+      case 0:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->o3.read(iprot);
-          this->__isset.o3 = true;
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31535,11 +31636,11 @@ uint32_t ThriftHiveMetastore_heartbeat_presult::read(::apache::thrift::protocol:
 }
 
 
-ThriftHiveMetastore_heartbeat_txn_range_args::~ThriftHiveMetastore_heartbeat_txn_range_args() throw() {
+ThriftHiveMetastore_abort_txn_args::~ThriftHiveMetastore_abort_txn_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_abort_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31562,8 +31663,8 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr
     {
       case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->txns.read(iprot);
-          this->__isset.txns = true;
+          xfer += this->rqst.read(iprot);
+          this->__isset.rqst = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31580,13 +31681,13 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::read(::apache::thrift::pr
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_abort_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_args");
 
-  xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += this->txns.write(oprot);
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->rqst.write(oprot);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -31595,17 +31696,17 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_args::write(::apache::thrift::p
 }
 
 
-ThriftHiveMetastore_heartbeat_txn_range_pargs::~ThriftHiveMetastore_heartbeat_txn_range_pargs() throw() {
+ThriftHiveMetastore_abort_txn_pargs::~ThriftHiveMetastore_abort_txn_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_abort_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_pargs");
 
-  xfer += oprot->writeFieldBegin("txns", ::apache::thrift::protocol::T_STRUCT, 1);
-  xfer += (*(this->txns)).write(oprot);
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->rqst)).write(oprot);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -31614,11 +31715,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_pargs::write(::apache::thrift::
 }
 
 
-ThriftHiveMetastore_heartbeat_txn_range_result::~ThriftHiveMetastore_heartbeat_txn_range_result() throw() {
+ThriftHiveMetastore_abort_txn_result::~ThriftHiveMetastore_abort_txn_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_abort_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31639,10 +31740,10 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::
     }
     switch (fid)
     {
-      case 0:
+      case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += this->success.read(iprot);
-          this->__isset.success = true;
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31659,15 +31760,15 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::read(::apache::thrift::
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_abort_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_txn_range_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_abort_txn_result");
 
-  if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
-    xfer += this->success.write(oprot);
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
     xfer += oprot->writeFieldEnd();
   }
   xfer += oprot->writeFieldStop();
@@ -31676,11 +31777,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_result::write(::apache::thrift:
 }
 
 
-ThriftHiveMetastore_heartbeat_txn_range_presult::~ThriftHiveMetastore_heartbeat_txn_range_presult() throw() {
+ThriftHiveMetastore_abort_txn_presult::~ThriftHiveMetastore_abort_txn_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_abort_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31701,10 +31802,10 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift:
     }
     switch (fid)
     {
-      case 0:
+      case 1:
         if (ftype == ::apache::thrift::protocol::T_STRUCT) {
-          xfer += (*(this->success)).read(iprot);
-          this->__isset.success = true;
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -31722,11 +31823,11 @@ uint32_t ThriftHiveMetastore_heartbeat_txn_range_presult::read(::apache::thrift:
 }
 
 
-ThriftHiveMetastore_compact_args::~ThriftHiveMetastore_compact_args() throw() {
+ThriftHiveMetastore_commit_txn_args::~ThriftHiveMetastore_commit_txn_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_commit_txn_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31767,10 +31868,10 @@ uint32_t ThriftHiveMetastore_compact_args::read(::apache::thrift::protocol::TPro
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_commit_txn_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_args");
 
   xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
   xfer += this->rqst.write(oprot);
@@ -31782,14 +31883,14 @@ uint32_t ThriftHiveMetastore_compact_args::write(::apache::thrift::protocol::TPr
 }
 
 
-ThriftHiveMetastore_compact_pargs::~ThriftHiveMetastore_compact_pargs() throw() {
+ThriftHiveMetastore_commit_txn_pargs::~ThriftHiveMetastore_commit_txn_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_commit_txn_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_pargs");
 
   xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
   xfer += (*(this->rqst)).write(oprot);
@@ -31801,11 +31902,11 @@ uint32_t ThriftHiveMetastore_compact_pargs::write(::apache::thrift::protocol::TP
 }
 
 
-ThriftHiveMetastore_compact_result::~ThriftHiveMetastore_compact_result() throw() {
+ThriftHiveMetastore_commit_txn_result::~ThriftHiveMetastore_commit_txn_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_commit_txn_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31824,7 +31925,28 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    xfer += iprot->skip(ftype);
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
     xfer += iprot->readFieldEnd();
   }
 
@@ -31833,23 +31955,32 @@ uint32_t ThriftHiveMetastore_compact_result::read(::apache::thrift::protocol::TP
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_compact_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_commit_txn_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
 
   uint32_t xfer = 0;
 
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_compact_result");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_commit_txn_result");
 
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
 }
 
 
-ThriftHiveMetastore_compact_presult::~ThriftHiveMetastore_compact_presult() throw() {
+ThriftHiveMetastore_commit_txn_presult::~ThriftHiveMetastore_commit_txn_presult() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_commit_txn_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31868,7 +31999,28 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T
     if (ftype == ::apache::thrift::protocol::T_STOP) {
       break;
     }
-    xfer += iprot->skip(ftype);
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
     xfer += iprot->readFieldEnd();
   }
 
@@ -31878,11 +32030,11 @@ uint32_t ThriftHiveMetastore_compact_presult::read(::apache::thrift::protocol::T
 }
 
 
-ThriftHiveMetastore_show_compact_args::~ThriftHiveMetastore_show_compact_args() throw() {
+ThriftHiveMetastore_lock_args::~ThriftHiveMetastore_lock_args() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_lock_args::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31923,10 +32075,10 @@ uint32_t ThriftHiveMetastore_show_compact_args::read(::apache::thrift::protocol:
   return xfer;
 }
 
-uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_lock_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_args");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_args");
 
   xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
   xfer += this->rqst.write(oprot);
@@ -31938,14 +32090,14 @@ uint32_t ThriftHiveMetastore_show_compact_args::write(::apache::thrift::protocol
 }
 
 
-ThriftHiveMetastore_show_compact_pargs::~ThriftHiveMetastore_show_compact_pargs() throw() {
+ThriftHiveMetastore_lock_pargs::~ThriftHiveMetastore_lock_pargs() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+uint32_t ThriftHiveMetastore_lock_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
   uint32_t xfer = 0;
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
-  xfer += oprot->writeStructBegin("ThriftHiveMetastore_show_compact_pargs");
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_lock_pargs");
 
   xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
   xfer += (*(this->rqst)).write(oprot);
@@ -31957,11 +32109,11 @@ uint32_t ThriftHiveMetastore_show_compact_pargs::write(::apache::thrift::protoco
 }
 
 
-ThriftHiveMetastore_show_compact_result::~ThriftHiveMetastore_show_compact_result() throw() {
+ThriftHiveMetastore_lock_result::~ThriftHiveMetastore_lock_result() throw() {
 }
 
 
-uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+uint32_t ThriftHiveMetastore_lock_result::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
   uint32_t xfer = 0;
@@ -31990,6 +32142,22 @@ uint32_t ThriftHiveMetastore_show_compact_result::read(::apache::thrift::protoco
           xfer += iprot->skip(ftype);
         }
         break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        b

<TRUNCATED>

[18/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
index 0d50ca9..53f3164 100644
--- a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
+++ b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out
@@ -97,6 +97,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src5
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@total_ordered
+POSTHOOK: Lineage: total_ordered.key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: total_ordered.value SIMPLE [(src5)src5.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: desc formatted total_ordered
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@total_ordered
@@ -210,6 +212,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src5
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@total_ordered
+POSTHOOK: Lineage: total_ordered.key SIMPLE [(src5)src5.FieldSchema(name:key, type:string, comment:null), ]
+POSTHOOK: Lineage: total_ordered.value SIMPLE [(src5)src5.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: desc formatted total_ordered
 PREHOOK: type: DESCTABLE
 PREHOOK: Input: default@total_ordered

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/parquet_join.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/parquet_join.q.out b/ql/src/test/results/clientpositive/spark/parquet_join.q.out
index f8b19a8..a8d98e8 100644
--- a/ql/src/test/results/clientpositive/spark/parquet_join.q.out
+++ b/ql/src/test/results/clientpositive/spark/parquet_join.q.out
@@ -46,6 +46,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_jointable1
+POSTHOOK: Lineage: parquet_jointable1.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable1.value SIMPLE [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@staging
@@ -56,6 +58,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@staging
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@parquet_jointable2
+POSTHOOK: Lineage: parquet_jointable2.c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- MR join

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/semijoin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/semijoin.q.out b/ql/src/test/results/clientpositive/spark/semijoin.q.out
index 3ed8c02..b1dd351 100644
--- a/ql/src/test/results/clientpositive/spark/semijoin.q.out
+++ b/ql/src/test/results/clientpositive/spark/semijoin.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from t1 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t1
@@ -41,6 +43,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@t1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(t1)t1.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t2 sort by key
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t2
@@ -72,6 +76,8 @@ POSTHOOK: Input: default@t1
 POSTHOOK: Input: default@t2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t3
+POSTHOOK: Lineage: t3.key EXPRESSION [(t1)t1.FieldSchema(name:key, type:int, comment:null), (t2)t2.FieldSchema(name:key, type:int, comment:null), ]
+POSTHOOK: Lineage: t3.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), (t2)t2.FieldSchema(name:value, type:string, comment:null), ]
 PREHOOK: query: select * from t3 sort by key, value
 PREHOOK: type: QUERY
 PREHOOK: Input: default@t3

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
index 68ce9e4..51c1953 100644
--- a/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
+++ b/ql/src/test/results/clientpositive/spark/skewjoin_noskew.q.out
@@ -175,6 +175,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@noskew
+POSTHOOK: Lineage: noskew.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: noskew.value SIMPLE [(src)a.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select * from noskew
 PREHOOK: type: QUERY
 PREHOOK: Input: default@noskew

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/stats5.q.out b/ql/src/test/results/clientpositive/spark/stats5.q.out
index 51bf6e4..95293c0 100644
--- a/ql/src/test/results/clientpositive/spark/stats5.q.out
+++ b/ql/src/test/results/clientpositive/spark/stats5.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@analyze_src
+POSTHOOK: Lineage: analyze_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain analyze table analyze_src compute statistics
 PREHOOK: type: QUERY
 POSTHOOK: query: explain analyze table analyze_src compute statistics

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out b/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out
index 6ab9181..b5742ad 100644
--- a/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_nontemp
+POSTHOOK: Lineage: src_nontemp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_nontemp.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TEMPORARY TABLE src_temp AS SELECT * FROM src limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/union24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union24.q.out b/ql/src/test/results/clientpositive/spark/union24.q.out
index 36ec00f..3bdc503 100644
--- a/ql/src/test/results/clientpositive/spark/union24.q.out
+++ b/ql/src/test/results/clientpositive/spark/union24.q.out
@@ -12,6 +12,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src2
+POSTHOOK: Lineage: src2.count EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: src2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table src3 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -22,6 +24,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src3
+POSTHOOK: Lineage: src3.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src3.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src4 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -32,6 +36,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src4
+POSTHOOK: Lineage: src4.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src4.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: create table src5 as select * from src2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src2
@@ -42,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src5
+POSTHOOK: Lineage: src5.count SIMPLE [(src2)src2.FieldSchema(name:count, type:bigint, comment:null), ]
+POSTHOOK: Lineage: src5.key SIMPLE [(src2)src2.FieldSchema(name:key, type:string, comment:null), ]
 PREHOOK: query: explain extended
 select s.key, s.count from (
   select key, count from src2  where key < 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/union27.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union27.q.out b/ql/src/test/results/clientpositive/spark/union27.q.out
index 113a3da..bccbbb1 100644
--- a/ql/src/test/results/clientpositive/spark/union27.q.out
+++ b/ql/src/test/results/clientpositive/spark/union27.q.out
@@ -10,6 +10,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_same
+POSTHOOK: Lineage: jackson_sev_same.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_same.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table dim_pho as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -20,6 +22,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@dim_pho
+POSTHOOK: Lineage: dim_pho.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: dim_pho.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table jackson_sev_add as select * from src
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -30,6 +34,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@jackson_sev_add
+POSTHOOK: Lineage: jackson_sev_add.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: jackson_sev_add.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select b.* from jackson_sev_same a join (select * from dim_pho union all select * from jackson_sev_add)b on a.key=b.key and b.key=97

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/union31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union31.q.out b/ql/src/test/results/clientpositive/spark/union31.q.out
index 4482b95..bbe49aa 100644
--- a/ql/src/test/results/clientpositive/spark/union31.q.out
+++ b/ql/src/test/results/clientpositive/spark/union31.q.out
@@ -20,6 +20,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select * from src where key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -30,6 +32,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t3(key string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
@@ -591,6 +595,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: create table t2 as select key, count(1) as cnt from src where key < 10 group by key
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -601,6 +607,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.cnt EXPRESSION [(src)src.null, ]
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 PREHOOK: query: create table t7(c1 string, cnt int)
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/union32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union32.q.out b/ql/src/test/results/clientpositive/spark/union32.q.out
index ec7447e..1ec7e64 100644
--- a/ql/src/test/results/clientpositive/spark/union32.q.out
+++ b/ql/src/test/results/clientpositive/spark/union32.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t1
+POSTHOOK: Lineage: t1.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: CREATE TABLE t2 AS SELECT * FROM src WHERE key < 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@src
@@ -28,6 +30,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t2
+POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: -- Test simple union with double
 EXPLAIN
 SELECT * FROM 

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/union_top_level.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
index 6560db1..385a99d 100644
--- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out
+++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out
@@ -529,6 +529,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@union_top
+POSTHOOK: Lineage: union_top.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: union_top.value EXPRESSION []
 PREHOOK: query: select * from union_top
 PREHOOK: type: QUERY
 PREHOOK: Input: default@union_top

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
index 7fd7833..06490a8 100644
--- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_date_test
+POSTHOOK: Lineage: decimal_date_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_date_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
index 6646e1c..cfdfce1 100644
--- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out
@@ -16,6 +16,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_vgby
+POSTHOOK: Lineage: decimal_vgby.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: -- SORT_QUERY_RESULTS
 
 -- First only do simple aggregations that output primitives only

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
index 66cc11b..50134d9 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -117,6 +165,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
index 4a1780f..dba7cbd 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
index 0673538..1c3b7a6 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
index e64ea65..cc1db38 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1b
+POSTHOOK: Lineage: small_alltypesorc1b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2b
+POSTHOOK: Lineage: small_alltypesorc2b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3b
+POSTHOOK: Lineage: small_alltypesorc3b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4b
+POSTHOOK: Lineage: small_alltypesorc4b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b
@@ -132,6 +180,18 @@ POSTHOOK: Input: default@small_alltypesorc3b
 POSTHOOK: Input: default@small_alltypesorc4b
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_b
+POSTHOOK: Lineage: small_alltypesorc_b.cbigint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cdouble EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cfloat EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.csmallint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctinyint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_b

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
index f73f25a..8c065f2 100644
--- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sorted_mod_4
+POSTHOOK: Lineage: sorted_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: sorted_mod_4.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@sorted_mod_4
@@ -46,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table
+POSTHOOK: Lineage: small_table.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table
@@ -718,6 +722,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@mod_8_mod_4
+POSTHOOK: Lineage: mod_8_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: mod_8_mod_4.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mod_8_mod_4
@@ -746,6 +752,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table2
+POSTHOOK: Lineage: small_table2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table2.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
index c20033c..9a6cb52 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_decimal_date.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@date_decimal_test
+POSTHOOK: Lineage: date_decimal_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdecimal EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
index 4cb21a1..e9fb1e8 100644
--- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
@@ -3026,6 +3026,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesnull
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesnullorc
+POSTHOOK: Lineage: alltypesnullorc.cbigint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cdouble SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cfloat SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.csmallint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctinyint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: explain
 select count(*) from alltypesnullorc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out
index cb949e4..7374714 100644
--- a/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out
+++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_1.q.out
@@ -148,6 +148,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src/_/cbo
+POSTHOOK: Lineage: src/_/cbo.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src/_/cbo.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: analyze table `c/b/o_t1` partition (dt) compute statistics
 PREHOOK: type: QUERY
 PREHOOK: Input: default@c/b/o_t1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats5.q.out b/ql/src/test/results/clientpositive/stats5.q.out
index 93cff91..74ddadb 100644
--- a/ql/src/test/results/clientpositive/stats5.q.out
+++ b/ql/src/test/results/clientpositive/stats5.q.out
@@ -8,6 +8,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@analyze_src
+POSTHOOK: Lineage: analyze_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: analyze_src.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: explain analyze table analyze_src compute statistics
 PREHOOK: type: QUERY
 POSTHOOK: query: explain analyze table analyze_src compute statistics

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out b/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
index 7c20c46..652acbb 100644
--- a/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
+++ b/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
@@ -198,6 +198,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@tbl_s2m
+POSTHOOK: Lineage: tbl_s2m.t SIMPLE []
 PREHOOK: query: select str_to_map(t,'_','=')['333'] from tbl_s2m
 PREHOOK: type: QUERY
 PREHOOK: Input: default@tbl_s2m

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/temp_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/temp_table.q.out b/ql/src/test/results/clientpositive/temp_table.q.out
index d180d03..8aedfa1 100644
--- a/ql/src/test/results/clientpositive/temp_table.q.out
+++ b/ql/src/test/results/clientpositive/temp_table.q.out
@@ -533,6 +533,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@s
+POSTHOOK: Lineage: s.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: s.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
 PREHOOK: query: select count(*) from s
 PREHOOK: type: QUERY
 PREHOOK: Input: default@s


[49/51] [abbrv] hive git commit: HIVE-4662: first_value can't have more than one order by column (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-4662: first_value can't have more than one order by column (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b574501
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b574501
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b574501

Branch: refs/heads/llap
Commit: 0b574501c538d8011898fbe23a712feffc2925b6
Parents: 06b604a
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Mar 11 08:14:28 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Wed Mar 16 20:02:55 2016 +0100

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/PTFTranslator.java     |  25 +-
 .../hadoop/hive/ql/parse/WindowingSpec.java     |  56 +-
 .../hadoop/hive/ql/plan/PTFDeserializer.java    |   5 +-
 .../hive/ql/plan/ptf/PTFExpressionDef.java      |   3 +-
 .../hive/ql/plan/ptf/ValueBoundaryDef.java      |  16 +-
 .../hive/ql/udf/ptf/WindowingTableFunction.java | 303 ++++--
 .../clientpositive/windowing_range_multiorder.q |  34 +
 .../windowing_range_multiorder.q.out            | 910 +++++++++++++++++++
 8 files changed, 1232 insertions(+), 120 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
index 9921b21..018d8d0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/PTFTranslator.java
@@ -31,8 +31,6 @@ import java.util.Stack;
 import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.tree.TreeWizard;
 import org.antlr.runtime.tree.TreeWizard.ContextVisitor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -99,6 +97,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class PTFTranslator {
 
@@ -547,15 +547,20 @@ public class PTFTranslator {
     if (bndSpec instanceof ValueBoundarySpec) {
       ValueBoundarySpec vBndSpec = (ValueBoundarySpec) bndSpec;
       ValueBoundaryDef vbDef = new ValueBoundaryDef(vBndSpec.getDirection(), vBndSpec.getAmt());
-      PTFTranslator.validateNoLeadLagInValueBoundarySpec(vBndSpec.getExpression());
-      PTFExpressionDef exprDef = null;
-      try {
-        exprDef = buildExpressionDef(inpShape, vBndSpec.getExpression());
-      } catch (HiveException he) {
-        throw new SemanticException(he);
+      for (OrderExpression oe : vBndSpec.getOrderExpressions()) {
+        PTFTranslator.validateNoLeadLagInValueBoundarySpec(oe.getExpression());
+        PTFExpressionDef exprDef = null;
+        try {
+          exprDef = buildExpressionDef(inpShape, oe.getExpression());
+        } catch (HiveException he) {
+          throw new SemanticException(he);
+        }
+        PTFTranslator.validateValueBoundaryExprType(exprDef.getOI());
+        OrderExpressionDef orderExprDef = new OrderExpressionDef(exprDef);
+        orderExprDef.setOrder(oe.getOrder());
+        orderExprDef.setNullOrder(oe.getNullOrder());
+        vbDef.addOrderExpressionDef(orderExprDef);
       }
-      PTFTranslator.validateValueBoundaryExprType(exprDef.getOI());
-      vbDef.setExpressionDef(exprDef);
       return vbDef;
     }
     else if (bndSpec instanceof RangeBoundarySpec) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
index 1bfe8d9..5ce7200 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/WindowingSpec.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.hive.ql.parse;
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.List;
 
 import org.antlr.runtime.CommonToken;
 import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
 import org.apache.hadoop.hive.ql.exec.WindowFunctionInfo;
+import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec;
@@ -295,17 +297,30 @@ public class WindowingSpec {
     BoundarySpec end = wFrame.getEnd();
 
     if (start instanceof ValueBoundarySpec || end instanceof ValueBoundarySpec) {
-      if ( order != null ) {
-        if ( order.getExpressions().size() > 1 ) {
-          throw new SemanticException("Range based Window Frame can have only 1 Sort Key");
-        }
+      if (order == null || order.getExpressions().size() == 0) {
+        throw new SemanticException("Range based Window Frame needs to specify ORDER BY clause");
+      }
 
-        if (start instanceof ValueBoundarySpec) {
-          ((ValueBoundarySpec)start).setExpression(order.getExpressions().get(0).getExpression());
-        }
-        if (end instanceof ValueBoundarySpec) {
-          ((ValueBoundarySpec)end).setExpression(order.getExpressions().get(0).getExpression());
-        }
+      boolean defaultPreceding = start.getDirection() == Direction.PRECEDING &&
+              start.getAmt() == BoundarySpec.UNBOUNDED_AMOUNT &&
+              end.getDirection() == Direction.CURRENT;
+      boolean defaultFollowing = start.getDirection() == Direction.CURRENT &&
+              end.getDirection() == Direction.FOLLOWING &&
+              end.getAmt() == BoundarySpec.UNBOUNDED_AMOUNT;
+      boolean defaultPrecedingFollowing = start.getDirection() == Direction.PRECEDING &&
+              start.getAmt() == BoundarySpec.UNBOUNDED_AMOUNT &&
+              end.getDirection() == Direction.FOLLOWING &&
+              end.getAmt() == BoundarySpec.UNBOUNDED_AMOUNT;
+      boolean multiOrderAllowed = defaultPreceding || defaultFollowing || defaultPrecedingFollowing;
+      if ( order.getExpressions().size() != 1 && !multiOrderAllowed) {
+        throw new SemanticException("Range value based Window Frame can have only 1 Sort Key");
+      }
+
+      if (start instanceof ValueBoundarySpec) {
+        ((ValueBoundarySpec)start).setOrderExpressions(order.getExpressions());
+      }
+      if (end instanceof ValueBoundarySpec) {
+        ((ValueBoundarySpec)end).setOrderExpressions(order.getExpressions());
       }
     }
   }
@@ -683,8 +698,8 @@ public class WindowingSpec {
   public static class ValueBoundarySpec extends BoundarySpec
   {
     Direction direction;
-    ASTNode expression;
     int amt;
+    List<OrderExpression> orderExpressions;
 
     public ValueBoundarySpec() {
     }
@@ -708,14 +723,14 @@ public class WindowingSpec {
       this.direction = direction;
     }
 
-    public ASTNode getExpression()
+    public List<OrderExpression> getOrderExpressions()
     {
-      return expression;
+      return orderExpressions;
     }
 
-    public void setExpression(ASTNode expression)
+    public void setOrderExpressions(List<OrderExpression> orderExpressions)
     {
-      this.expression = expression;
+      this.orderExpressions = orderExpressions;
     }
 
     @Override
@@ -733,7 +748,16 @@ public class WindowingSpec {
     @Override
     public String toString()
     {
-      return String.format("value(%s %s %s)", expression.toStringTree(), amt, direction);
+      StringBuilder exprs = new StringBuilder();
+      if (orderExpressions != null) {
+        for (int i=0; i<orderExpressions.size(); i++) {
+          exprs.append(i == 0 ? orderExpressions.get(i).getExpression().toStringTree()
+                  : ", " + orderExpressions.get(i).getExpression().toStringTree());
+        }
+      } else {
+        exprs.append("No order expression");
+      }
+      return String.format("value(%s %s %s)", exprs.toString(), amt, direction);
     }
 
     public int compareTo(BoundarySpec other)

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
index 830a8eb..cfddb22 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDeserializer.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.parse.LeadLagInfo;
 import org.apache.hadoop.hive.ql.parse.WindowingExprNodeEvaluatorFactory;
 import org.apache.hadoop.hive.ql.plan.ptf.BoundaryDef;
+import org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PTFInputDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PTFQueryInputDef;
@@ -214,7 +215,9 @@ public class PTFDeserializer {
   protected void initialize(BoundaryDef def, ShapeDetails inpShape) throws HiveException {
     if (def instanceof ValueBoundaryDef) {
       ValueBoundaryDef vDef = (ValueBoundaryDef) def;
-      initialize(vDef.getExpressionDef(), inpShape);
+      for (OrderExpressionDef exprDef : vDef.getOrderDef().getExpressions()) {
+        initialize(exprDef, inpShape);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java
index fa7fc76..aa82fbf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java
@@ -19,10 +19,9 @@
 package org.apache.hadoop.hive.ql.plan.ptf;
 
 import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator;
-import org.apache.hadoop.hive.ql.exec.PTFUtils;
 import org.apache.hadoop.hive.ql.plan.Explain;
-import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.ql.plan.Explain.Level;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
 public class PTFExpressionDef {

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ValueBoundaryDef.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ValueBoundaryDef.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ValueBoundaryDef.java
index 3725ac8..e1a8a90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ValueBoundaryDef.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ValueBoundaryDef.java
@@ -19,16 +19,16 @@
 package org.apache.hadoop.hive.ql.plan.ptf;
 
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction;
-import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 
 public class ValueBoundaryDef extends BoundaryDef {
-  private PTFExpressionDef expressionDef;
+  private OrderDef orderDef;
   private final int amt;
   private final int relativeOffset;
 
   public ValueBoundaryDef(Direction direction, int amt) {
     this.direction = direction;
     this.amt = amt;
+    this.orderDef = new OrderDef();
 
     // Calculate relative offset
     switch(this.direction) {
@@ -52,16 +52,12 @@ public class ValueBoundaryDef extends BoundaryDef {
     return this.direction == Direction.PRECEDING ? vb.amt - this.amt : this.amt - vb.amt;
   }
 
-  public PTFExpressionDef getExpressionDef() {
-    return expressionDef;
+  public OrderDef getOrderDef() {
+    return orderDef;
   }
 
-  public void setExpressionDef(PTFExpressionDef expressionDef) {
-    this.expressionDef = expressionDef;
-  }
-
-  public ObjectInspector getOI() {
-    return expressionDef == null ? null : expressionDef.getOI();
+  public void addOrderExpressionDef(OrderExpressionDef expressionDef) {
+    this.orderDef.addExpression(expressionDef);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java
index 2ac4039..858b47a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java
@@ -38,13 +38,14 @@ import org.apache.hadoop.hive.ql.exec.PTFPartition.PTFPartitionIterator;
 import org.apache.hadoop.hive.ql.exec.PTFRollingPartition;
 import org.apache.hadoop.hive.ql.exec.WindowFunctionInfo;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
-import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.NullOrder;
 import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec;
 import org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction;
 import org.apache.hadoop.hive.ql.plan.PTFDesc;
 import org.apache.hadoop.hive.ql.plan.ptf.BoundaryDef;
+import org.apache.hadoop.hive.ql.plan.ptf.OrderDef;
+import org.apache.hadoop.hive.ql.plan.ptf.OrderExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PTFExpressionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef;
 import org.apache.hadoop.hive.ql.plan.ptf.ValueBoundaryDef;
@@ -110,9 +111,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     StructObjectInspector inputOI = iPart.getOutputOI();
 
     WindowTableFunctionDef wTFnDef = (WindowTableFunctionDef) getTableDef();
-    Order order = wTFnDef.getOrder().getExpressions().get(0).getOrder();
-    NullOrder nullOrder = wTFnDef.getOrder().getExpressions().get(0).getNullOrder();
-
     for(WindowFunctionDef wFn : wTFnDef.getWindowFunctions()) {
       boolean processWindow = processWindow(wFn);
       pItr.reset();
@@ -123,7 +121,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
         }
         oColumns.add((List<?>)out);
       } else {
-        oColumns.add(executeFnwithWindow(getQueryDef(), wFn, iPart, order, nullOrder));
+        oColumns.add(executeFnwithWindow(getQueryDef(), wFn, iPart));
       }
     }
 
@@ -422,8 +420,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       } else {
         int rowToProcess = streamingState.rollingPart.rowToProcess(wFn.getWindowFrame());
         if (rowToProcess >= 0) {
-          Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart,
-              streamingState.order, streamingState.nullOrder);
+          Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart);
           PTFPartitionIterator<Object> rItr = rng.iterator();
           PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr);
           Object out = evaluateWindowFunction(wFn, rItr);
@@ -500,8 +497,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
         while (numRowsRemaining > 0) {
           int rowToProcess = streamingState.rollingPart.size() - numRowsRemaining;
           if (rowToProcess >= 0) {
-            Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart,
-                streamingState.order, streamingState.nullOrder);
+            Range rng = getRange(wFn, rowToProcess, streamingState.rollingPart);
             PTFPartitionIterator<Object> rItr = rng.iterator();
             PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr);
             Object out = evaluateWindowFunction(wFn, rItr);
@@ -660,13 +656,11 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
   ArrayList<Object> executeFnwithWindow(PTFDesc ptfDesc,
       WindowFunctionDef wFnDef,
-      PTFPartition iPart,
-      Order order,
-      NullOrder nullOrder)
+      PTFPartition iPart)
     throws HiveException {
     ArrayList<Object> vals = new ArrayList<Object>();
     for(int i=0; i < iPart.size(); i++) {
-      Range rng = getRange(wFnDef, i, iPart, order, nullOrder);
+      Range rng = getRange(wFnDef, i, iPart);
       PTFPartitionIterator<Object> rItr = rng.iterator();
       PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr);
       Object out = evaluateWindowFunction(wFnDef, rItr);
@@ -675,7 +669,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     return vals;
   }
 
-  private Range getRange(WindowFunctionDef wFnDef, int currRow, PTFPartition p, Order order, NullOrder nullOrder) throws HiveException
+  private Range getRange(WindowFunctionDef wFnDef, int currRow, PTFPartition p) throws HiveException
   {
     BoundaryDef startB = wFnDef.getWindowFrame().getStart();
     BoundaryDef endB = wFnDef.getWindowFrame().getEnd();
@@ -686,18 +680,26 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
 
     int start, end;
-
     if (rowFrame) {
       start = getRowBoundaryStart(startB, currRow);
       end = getRowBoundaryEnd(endB, currRow, p);
-    }
-    else {
+    } else {
       ValueBoundaryScanner vbs;
       if ( startB instanceof ValueBoundaryDef ) {
-        vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)startB, order, nullOrder);
+        ValueBoundaryDef startValueBoundaryDef = (ValueBoundaryDef)startB;
+        if (startValueBoundaryDef.getOrderDef().getExpressions().size() != 1) {
+          vbs = MultiValueBoundaryScanner.getScanner(startValueBoundaryDef);
+        } else {
+          vbs = SingleValueBoundaryScanner.getScanner(startValueBoundaryDef);
+        }
       }
       else {
-        vbs = ValueBoundaryScanner.getScanner((ValueBoundaryDef)endB, order, nullOrder);
+        ValueBoundaryDef endValueBoundaryDef = (ValueBoundaryDef)endB;
+        if (endValueBoundaryDef.getOrderDef().getExpressions().size() != 1) {
+          vbs = MultiValueBoundaryScanner.getScanner(endValueBoundaryDef);
+        } else {
+          vbs = SingleValueBoundaryScanner.getScanner(endValueBoundaryDef);
+        }
       }
       vbs.reset(startB);
       start =  vbs.computeStart(currRow, p);
@@ -770,29 +772,35 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  /*
-   * - starting from the given rowIdx scan in the given direction until a row's expr
-   * evaluates to an amt that crosses the 'amt' threshold specified in the ValueBoundaryDef.
-   */
-  static abstract class ValueBoundaryScanner
-  {
+
+  static abstract class ValueBoundaryScanner {
     BoundaryDef bndDef;
-    Order order;
-    NullOrder nullOrder;
-    PTFExpressionDef expressionDef;
 
-    public ValueBoundaryScanner(BoundaryDef bndDef, Order order, NullOrder nullOrder, PTFExpressionDef expressionDef)
-    {
+    public ValueBoundaryScanner(BoundaryDef bndDef) {
       this.bndDef = bndDef;
-      this.order = order;
-      this.nullOrder = nullOrder;
-      this.expressionDef = expressionDef;
     }
 
     public void reset(BoundaryDef bndDef) {
       this.bndDef = bndDef;
     }
 
+    protected abstract int computeStart(int rowIdx, PTFPartition p) throws HiveException;
+
+    protected abstract int computeEnd(int rowIdx, PTFPartition p) throws HiveException;
+  }
+
+  /*
+   * - starting from the given rowIdx scan in the given direction until a row's expr
+   * evaluates to an amt that crosses the 'amt' threshold specified in the ValueBoundaryDef.
+   */
+  static abstract class SingleValueBoundaryScanner extends ValueBoundaryScanner {
+    OrderExpressionDef expressionDef;
+
+    public SingleValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef);
+      this.expressionDef = expressionDef;
+    }
+
     /*
 |  Use | Boundary1.type | Boundary1. amt | Sort Key | Order | Behavior                          |
 | Case |                |                |          |       |                                   |
@@ -826,6 +834,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 |      |                |                |          |       | such that R2.sk - R.sk > amt      |
 |------+----------------+----------------+----------+-------+-----------------------------------|
      */
+    @Override
     protected int computeStart(int rowIdx, PTFPartition p) throws HiveException {
       switch(bndDef.getDirection()) {
       case PRECEDING:
@@ -838,9 +847,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       }
     }
 
-    /*
-     *
-     */
     protected int computeStartPreceding(int rowIdx, PTFPartition p) throws HiveException {
       int amt = bndDef.getAmt();
       // Use Case 1.
@@ -851,7 +857,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
       if ( sortKey == null ) {
         // Use Case 2.
-        if ( order == Order.ASC ) {
+        if ( expressionDef.getOrder() == Order.ASC ) {
           return 0;
         }
         else { // Use Case 3.
@@ -869,7 +875,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       int r = rowIdx;
 
       // Use Case 4.
-      if ( order == Order.DESC ) {
+      if ( expressionDef.getOrder() == Order.DESC ) {
         while (r >= 0 && !isDistanceGreater(rowVal, sortKey, amt) ) {
           r--;
           if ( r >= 0 ) {
@@ -925,7 +931,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
       if ( sortKey == null ) {
         // Use Case 9.
-        if ( order == Order.DESC) {
+        if ( expressionDef.getOrder() == Order.DESC) {
           return p.size();
         }
         else { // Use Case 10.
@@ -940,7 +946,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       }
 
       // Use Case 11.
-      if ( order == Order.DESC) {
+      if ( expressionDef.getOrder() == Order.DESC) {
         while (r < p.size() && !isDistanceGreater(sortKey, rowVal, amt) ) {
           r++;
           if ( r < p.size() ) {
@@ -992,6 +998,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 |      |                |               |          |       | end = R2.idx                      |
 |------+----------------+---------------+----------+-------+-----------------------------------|
      */
+    @Override
     protected int computeEnd(int rowIdx, PTFPartition p) throws HiveException {
       switch(bndDef.getDirection()) {
       case PRECEDING:
@@ -1013,7 +1020,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
       if ( sortKey == null ) {
         // Use Case 2.
-        if ( order == Order.DESC ) {
+        if ( expressionDef.getOrder() == Order.DESC ) {
           return p.size();
         }
         else { // Use Case 3.
@@ -1025,7 +1032,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       int r = rowIdx;
 
       // Use Case 4.
-      if ( order == Order.DESC ) {
+      if ( expressionDef.getOrder() == Order.DESC ) {
         while (r >= 0 && !isDistanceGreater(rowVal, sortKey, amt) ) {
           r--;
           if ( r >= 0 ) {
@@ -1086,7 +1093,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
       if ( sortKey == null ) {
         // Use Case 9.
-        if ( order == Order.DESC) {
+        if ( expressionDef.getOrder() == Order.DESC) {
           return p.size();
         }
         else { // Use Case 10.
@@ -1101,7 +1108,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       }
 
       // Use Case 11.
-      if ( order == Order.DESC) {
+      if ( expressionDef.getOrder() == Order.DESC) {
         while (r < p.size() && !isDistanceGreater(sortKey, rowVal, amt) ) {
           r++;
           if ( r < p.size() ) {
@@ -1140,25 +1147,30 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
 
 
     @SuppressWarnings("incomplete-switch")
-    public static ValueBoundaryScanner getScanner(ValueBoundaryDef vbDef, Order order, NullOrder nullOrder)
+    public static SingleValueBoundaryScanner getScanner(ValueBoundaryDef vbDef)
         throws HiveException {
-      PrimitiveObjectInspector pOI = (PrimitiveObjectInspector) vbDef.getOI();
+      if (vbDef.getOrderDef().getExpressions().size() != 1) {
+        throw new HiveException("Internal error: initializing SingleValueBoundaryScanner with"
+                + " multiple expression for sorting");
+      }
+      OrderExpressionDef exprDef = vbDef.getOrderDef().getExpressions().get(0);
+      PrimitiveObjectInspector pOI = (PrimitiveObjectInspector) exprDef.getOI();
       switch(pOI.getPrimitiveCategory()) {
       case BYTE:
       case INT:
       case LONG:
       case SHORT:
       case TIMESTAMP:
-        return new LongValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef());
+        return new LongValueBoundaryScanner(vbDef, exprDef);
       case DOUBLE:
       case FLOAT:
-        return new DoubleValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef());
+        return new DoubleValueBoundaryScanner(vbDef, exprDef);
       case DECIMAL:
-        return new HiveDecimalValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef());
+        return new HiveDecimalValueBoundaryScanner(vbDef, exprDef);
       case DATE:
-        return new DateValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef());
+        return new DateValueBoundaryScanner(vbDef, exprDef);
       case STRING:
-        return new StringValueBoundaryScanner(vbDef, order, nullOrder, vbDef.getExpressionDef());
+        return new StringValueBoundaryScanner(vbDef, exprDef);
       }
       throw new HiveException(
           String.format("Internal Error: attempt to setup a Window for datatype %s",
@@ -1166,10 +1178,9 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  public static class LongValueBoundaryScanner extends ValueBoundaryScanner {
-    public LongValueBoundaryScanner(BoundaryDef bndDef, Order order, NullOrder nullOrder,
-        PTFExpressionDef expressionDef) {
-      super(bndDef,order,nullOrder,expressionDef);
+  public static class LongValueBoundaryScanner extends SingleValueBoundaryScanner {
+    public LongValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef,expressionDef);
     }
 
     @Override
@@ -1199,10 +1210,9 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  public static class DoubleValueBoundaryScanner extends ValueBoundaryScanner {
-    public DoubleValueBoundaryScanner(BoundaryDef bndDef, Order order,
-        NullOrder nullOrder, PTFExpressionDef expressionDef) {
-      super(bndDef,order,nullOrder,expressionDef);
+  public static class DoubleValueBoundaryScanner extends SingleValueBoundaryScanner {
+    public DoubleValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef,expressionDef);
     }
 
     @Override
@@ -1232,10 +1242,9 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  public static class HiveDecimalValueBoundaryScanner extends ValueBoundaryScanner {
-    public HiveDecimalValueBoundaryScanner(BoundaryDef bndDef, Order order,
-        NullOrder nullOrder, PTFExpressionDef expressionDef) {
-      super(bndDef,order,nullOrder,expressionDef);
+  public static class HiveDecimalValueBoundaryScanner extends SingleValueBoundaryScanner {
+    public HiveDecimalValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef,expressionDef);
     }
 
     @Override
@@ -1265,10 +1274,9 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  public static class DateValueBoundaryScanner extends ValueBoundaryScanner {
-    public DateValueBoundaryScanner(BoundaryDef bndDef, Order order,
-        NullOrder nullOrder, PTFExpressionDef expressionDef) {
-      super(bndDef,order,nullOrder,expressionDef);
+  public static class DateValueBoundaryScanner extends SingleValueBoundaryScanner {
+    public DateValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef,expressionDef);
     }
 
     @Override
@@ -1293,10 +1301,9 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
-  public static class StringValueBoundaryScanner extends ValueBoundaryScanner {
-    public StringValueBoundaryScanner(BoundaryDef bndDef, Order order,
-        NullOrder nullOrder, PTFExpressionDef expressionDef) {
-      super(bndDef,order,nullOrder,expressionDef);
+  public static class StringValueBoundaryScanner extends SingleValueBoundaryScanner {
+    public StringValueBoundaryScanner(BoundaryDef bndDef, OrderExpressionDef expressionDef) {
+      super(bndDef,expressionDef);
     }
 
     @Override
@@ -1318,6 +1325,149 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     }
   }
 
+  /*
+   */
+  static class MultiValueBoundaryScanner extends ValueBoundaryScanner {
+    OrderDef orderDef;
+
+    public MultiValueBoundaryScanner(BoundaryDef bndDef, OrderDef orderDef) {
+      super(bndDef);
+      this.orderDef = orderDef;
+    }
+
+    /*
+|------+----------------+----------------+----------+-------+-----------------------------------|
+| Use  | Boundary1.type | Boundary1. amt | Sort Key | Order | Behavior                          |
+| Case |                |                |          |       |                                   |
+|------+----------------+----------------+----------+-------+-----------------------------------|
+|   1. | PRECEDING      | UNB            | ANY      | ANY   | start = 0                         |
+|   2. | CURRENT ROW    |                | ANY      | ANY   | scan backwards until row R2       |
+|      |                |                |          |       | such R2.sk != R.sk                |
+|      |                |                |          |       | start = R2.idx + 1                |
+|------+----------------+----------------+----------+-------+-----------------------------------|
+     */
+    @Override
+    protected int computeStart(int rowIdx, PTFPartition p) throws HiveException {
+      switch(bndDef.getDirection()) {
+      case PRECEDING:
+        return computeStartPreceding(rowIdx, p);
+      case CURRENT:
+        return computeStartCurrentRow(rowIdx, p);
+      case FOLLOWING:
+        default:
+          throw new HiveException(
+                  "FOLLOWING not allowed for starting RANGE with multiple expressions in ORDER BY");
+      }
+    }
+
+    protected int computeStartPreceding(int rowIdx, PTFPartition p) throws HiveException {
+      int amt = bndDef.getAmt();
+      if ( amt == BoundarySpec.UNBOUNDED_AMOUNT ) {
+        return 0;
+      }
+      throw new HiveException(
+              "PRECEDING needs UNBOUNDED for RANGE with multiple expressions in ORDER BY");
+    }
+
+    protected int computeStartCurrentRow(int rowIdx, PTFPartition p) throws HiveException {
+      Object[] sortKey = computeValues(p.getAt(rowIdx));
+      Object[] rowVal = sortKey;
+      int r = rowIdx;
+
+      while (r >= 0 && isEqual(rowVal, sortKey) ) {
+        r--;
+        if ( r >= 0 ) {
+          rowVal = computeValues(p.getAt(r));
+        }
+      }
+      return r + 1;
+    }
+
+    /*
+|------+----------------+---------------+----------+-------+-----------------------------------|
+| Use  | Boundary2.type | Boundary2.amt | Sort Key | Order | Behavior                          |
+| Case |                |               |          |       |                                   |
+|------+----------------+---------------+----------+-------+-----------------------------------|
+|   1. | CURRENT ROW    |               | ANY      | ANY   | scan forward until row R2         |
+|      |                |               |          |       | such that R2.sk != R.sk           |
+|      |                |               |          |       | end = R2.idx                      |
+|   2. | FOLLOWING      | UNB           | ANY      | ANY   | end = partition.size()            |
+|------+----------------+---------------+----------+-------+-----------------------------------|
+     */
+    @Override
+    protected int computeEnd(int rowIdx, PTFPartition p) throws HiveException {
+      switch(bndDef.getDirection()) {
+      case PRECEDING:
+        throw new HiveException(
+                "PRECEDING not allowed for finishing RANGE with multiple expressions in ORDER BY");
+      case CURRENT:
+        return computeEndCurrentRow(rowIdx, p);
+      case FOLLOWING:
+        default:
+          return computeEndFollowing(rowIdx, p);
+      }
+    }
+
+    protected int computeEndCurrentRow(int rowIdx, PTFPartition p) throws HiveException {
+      Object[] sortKey = computeValues(p.getAt(rowIdx));
+      Object[] rowVal = sortKey;
+      int r = rowIdx;
+
+      while (r < p.size() && isEqual(sortKey, rowVal) ) {
+        r++;
+        if ( r < p.size() ) {
+          rowVal = computeValues(p.getAt(r));
+        }
+      }
+      return r;
+    }
+
+    protected int computeEndFollowing(int rowIdx, PTFPartition p) throws HiveException {
+      int amt = bndDef.getAmt();
+      if ( amt == BoundarySpec.UNBOUNDED_AMOUNT ) {
+        return p.size();
+      }
+      throw new HiveException(
+              "FOLLOWING needs UNBOUNDED for RANGE with multiple expressions in ORDER BY");
+    }
+
+    public Object[] computeValues(Object row) throws HiveException {
+      Object[] objs = new Object[orderDef.getExpressions().size()];
+      for (int i = 0; i < objs.length; i++) {
+        Object o = orderDef.getExpressions().get(i).getExprEvaluator().evaluate(row);
+        objs[i] = ObjectInspectorUtils.copyToStandardObject(o, orderDef.getExpressions().get(i).getOI());
+      }
+      return objs;
+    }
+
+    public boolean isEqual(Object[] v1, Object[] v2) {
+      assert v1.length == v2.length;
+      for (int i = 0; i < v1.length; i++) {
+        if (v1[i] == null && v2[i] == null) {
+          continue;
+        }
+        if (v1[i] == null || v2[i] == null) {
+          return false;
+        }
+        if (ObjectInspectorUtils.compare(
+                v1[i], orderDef.getExpressions().get(i).getOI(),
+                v2[i], orderDef.getExpressions().get(i).getOI()) != 0) {
+          return false;
+        }
+      }
+      return true;
+    }
+
+    public static MultiValueBoundaryScanner getScanner(ValueBoundaryDef vbDef)
+        throws HiveException {
+      if (vbDef.getOrderDef().getExpressions().size() <= 1) {
+        throw new HiveException("Internal error: initializing SingleValueBoundaryScanner with"
+                + " multiple expression for sorting");
+      }
+      return new MultiValueBoundaryScanner(vbDef, vbDef.getOrderDef());
+    }
+  }
+
   public static class SameList<E> extends AbstractList<E> {
     int sz;
     E val;
@@ -1351,8 +1501,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
      */
     int[] wFnsToProcess;
     WindowTableFunctionDef wTFnDef;
-    Order order;
-    NullOrder nullOrder;
     PTFDesc ptfDesc;
     StructObjectInspector inputOI;
     AggregationBuffer[] aggBuffers;
@@ -1367,8 +1515,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       this.wFnsToProcess = wFnsToProcess;
       this.currIdx = 0;
       wTFnDef = (WindowTableFunctionDef) getTableDef();
-      order = wTFnDef.getOrder().getExpressions().get(0).getOrder();
-      nullOrder = wTFnDef.getOrder().getExpressions().get(0).getNullOrder();
       ptfDesc = getQueryDef();
       inputOI = iPart.getOutputOI();
 
@@ -1423,7 +1569,7 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
             out = ObjectInspectorUtils.copyToStandardObject(out, wFn.getOI());
             output.set(j, out);
           } else {
-            Range rng = getRange(wFn, currIdx, iPart, order, nullOrder);
+            Range rng = getRange(wFn, currIdx, iPart);
             PTFPartitionIterator<Object> rItr = rng.iterator();
             PTFOperator.connectLeadLagFunctionsToPartition(ptfDesc, rItr);
             output.set(j, evaluateWindowFunction(wFn, rItr));
@@ -1459,8 +1605,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
     List<Object>[] fnOutputs;
     AggregationBuffer[] aggBuffers;
     Object[][] funcArgs;
-    Order order;
-    NullOrder nullOrder;
     RankLimit rnkLimit;
 
     @SuppressWarnings("unchecked")
@@ -1474,9 +1618,6 @@ public class WindowingTableFunction extends TableFunctionEvaluator {
       rollingPart = PTFPartition.createRolling(cfg, serde, inputOI, outputOI,
           precedingSpan, followingSpan);
 
-      order = tabDef.getOrder().getExpressions().get(0).getOrder();
-      nullOrder = tabDef.getOrder().getExpressions().get(0).getNullOrder();
-
       int numFns = tabDef.getWindowFunctions().size();
       fnOutputs = new ArrayList[numFns];
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/windowing_range_multiorder.q b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
new file mode 100644
index 0000000..d8ca4d6
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/windowing_range_multiorder.q
@@ -0,0 +1,34 @@
+drop table over10k;
+
+create table over10k(
+           t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+       row format delimited
+       fields terminated by '|';
+
+load data local inpath '../../data/files/over10k' into table over10k;
+
+select first_value(t) over ( partition by si order by i, b ) from over10k limit 100;
+
+select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100;
+
+select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100;
+
+select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100;
+
+select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100;
+
+select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100;
+
+select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100;
+
+select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100;

http://git-wip-us.apache.org/repos/asf/hive/blob/0b574501/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out b/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out
new file mode 100644
index 0000000..9910883
--- /dev/null
+++ b/ql/src/test/results/clientpositive/windowing_range_multiorder.q.out
@@ -0,0 +1,910 @@
+PREHOOK: query: drop table over10k
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table over10k
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table over10k(
+           t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+       row format delimited
+       fields terminated by '|'
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@over10k
+POSTHOOK: query: create table over10k(
+           t tinyint,
+           si smallint,
+           i int,
+           b bigint,
+           f float,
+           d double,
+           bo boolean,
+           s string,
+           ts timestamp,
+           dec decimal(4,2),
+           bin binary)
+       row format delimited
+       fields terminated by '|'
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@over10k
+PREHOOK: query: load data local inpath '../../data/files/over10k' into table over10k
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@over10k
+POSTHOOK: query: load data local inpath '../../data/files/over10k' into table over10k
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@over10k
+PREHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select first_value(t) over ( partition by si order by i, b ) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+51
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+48
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+47
+PREHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select last_value(i) over (partition by si, bo order by i, f desc range current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+65543
+65549
+65558
+65580
+65586
+65596
+65616
+65620
+65627
+65640
+65643
+65706
+65713
+65737
+65744
+65752
+65778
+65540
+65563
+65599
+65604
+65613
+65613
+65615
+65651
+65653
+65668
+65693
+65731
+65733
+65738
+65741
+65744
+65747
+65763
+65778
+65789
+65541
+65547
+65560
+65572
+65574
+65575
+65578
+65588
+65594
+65610
+65691
+65694
+65711
+65719
+65722
+65738
+65756
+65790
+65542
+65557
+65566
+65584
+65610
+65612
+65626
+65631
+65638
+65654
+65654
+65655
+65699
+65712
+65720
+65732
+65748
+65752
+65771
+65771
+65771
+65781
+65565
+65569
+65573
+65582
+65584
+65606
+65656
+65669
+65717
+65724
+65728
+65761
+65762
+65770
+65771
+65781
+65546
+65551
+65551
+65568
+65568
+65579
+65603
+PREHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select row_number() over (partition by si, bo order by i, f desc range between unbounded preceding and unbounded following) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+11
+12
+13
+14
+15
+16
+1
+2
+3
+4
+5
+6
+7
+PREHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select s, si, i, avg(i) over (partition by s range between unbounded preceding and current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+alice allen	451	65662	65640.125
+alice allen	462	65545	65640.125
+alice allen	501	65720	65640.125
+alice allen	501	65670	65640.125
+alice allen	484	65600	65640.125
+alice allen	472	65609	65640.125
+alice allen	509	65758	65640.125
+alice allen	400	65557	65640.125
+alice brown	425	65570	65696.71428571429
+alice brown	376	65708	65696.71428571429
+alice brown	324	65569	65696.71428571429
+alice brown	302	65711	65696.71428571429
+alice brown	381	65704	65696.71428571429
+alice brown	452	65666	65696.71428571429
+alice brown	346	65696	65696.71428571429
+alice brown	471	65733	65696.71428571429
+alice brown	409	65667	65696.71428571429
+alice brown	399	65779	65696.71428571429
+alice brown	332	65781	65696.71428571429
+alice brown	337	65707	65696.71428571429
+alice brown	499	65790	65696.71428571429
+alice brown	492	65673	65696.71428571429
+alice carson	404	65710	65645.4
+alice carson	376	65576	65645.4
+alice carson	508	65545	65645.4
+alice carson	427	65559	65645.4
+alice carson	473	65565	65645.4
+alice carson	390	65747	65645.4
+alice carson	318	65695	65645.4
+alice carson	316	65559	65645.4
+alice carson	268	65713	65645.4
+alice carson	380	65785	65645.4
+alice davidson	298	65554	65648.5
+alice davidson	479	65631	65648.5
+alice davidson	445	65590	65648.5
+alice davidson	384	65676	65648.5
+alice davidson	408	65791	65648.5
+alice davidson	321	65677	65648.5
+alice davidson	448	65641	65648.5
+alice davidson	423	65740	65648.5
+alice davidson	270	65563	65648.5
+alice davidson	431	65677	65648.5
+alice davidson	487	65596	65648.5
+alice davidson	402	65544	65648.5
+alice davidson	272	65742	65648.5
+alice davidson	287	65747	65648.5
+alice davidson	328	65547	65648.5
+alice davidson	437	65690	65648.5
+alice davidson	308	65560	65648.5
+alice davidson	408	65707	65648.5
+alice ellison	405	65713	65669.13333333333
+alice ellison	490	65572	65669.13333333333
+alice ellison	354	65698	65669.13333333333
+alice ellison	331	65557	65669.13333333333
+alice ellison	313	65612	65669.13333333333
+alice ellison	296	65741	65669.13333333333
+alice ellison	403	65544	65669.13333333333
+alice ellison	482	65681	65669.13333333333
+alice ellison	320	65745	65669.13333333333
+alice ellison	274	65537	65669.13333333333
+alice ellison	256	65744	65669.13333333333
+alice ellison	355	65699	65669.13333333333
+alice ellison	343	65787	65669.13333333333
+alice ellison	335	65730	65669.13333333333
+alice ellison	374	65677	65669.13333333333
+alice falkner	342	65752	65695.76470588235
+alice falkner	280	65597	65695.76470588235
+alice falkner	393	65611	65695.76470588235
+alice falkner	389	65699	65695.76470588235
+alice falkner	345	65773	65695.76470588235
+alice falkner	500	65775	65695.76470588235
+alice falkner	323	65669	65695.76470588235
+alice falkner	393	65685	65695.76470588235
+alice falkner	339	65785	65695.76470588235
+alice falkner	382	65690	65695.76470588235
+alice falkner	371	65710	65695.76470588235
+alice falkner	481	65709	65695.76470588235
+alice falkner	311	65715	65695.76470588235
+alice falkner	477	65722	65695.76470588235
+alice falkner	382	65622	65695.76470588235
+alice falkner	455	65718	65695.76470588235
+alice falkner	452	65596	65695.76470588235
+alice garcia	388	65675	65688.76923076923
+alice garcia	366	65744	65688.76923076923
+alice garcia	331	65734	65688.76923076923
+alice garcia	299	65623	65688.76923076923
+alice garcia	379	65746	65688.76923076923
+alice garcia	486	65725	65688.76923076923
+alice garcia	427	65674	65688.76923076923
+alice garcia	263	65630	65688.76923076923
+alice garcia	459	65712	65688.76923076923
+alice garcia	446	65759	65688.76923076923
+alice garcia	325	65573	65688.76923076923
+alice garcia	309	65746	65688.76923076923
+alice garcia	446	65613	65688.76923076923
+alice hernandez	396	65545	65678.38888888889
+alice hernandez	336	65786	65678.38888888889
+alice hernandez	324	65720	65678.38888888889
+alice hernandez	270	65717	65678.38888888889
+alice hernandez	323	65727	65678.38888888889
+PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+alice allen	400	65557	65557.0
+alice allen	451	65662	65609.5
+alice allen	462	65545	65588.0
+alice allen	472	65609	65593.25
+alice allen	484	65600	65594.6
+alice allen	501	65670	65607.16666666667
+alice allen	501	65720	65623.28571428571
+alice allen	509	65758	65640.125
+alice brown	302	65711	65711.0
+alice brown	324	65569	65640.0
+alice brown	332	65781	65687.0
+alice brown	337	65707	65692.0
+alice brown	346	65696	65692.8
+alice brown	376	65708	65695.33333333333
+alice brown	381	65704	65696.57142857143
+alice brown	399	65779	65706.875
+alice brown	409	65667	65702.44444444444
+alice brown	425	65570	65689.2
+alice brown	452	65666	65687.09090909091
+alice brown	471	65733	65690.91666666667
+alice brown	492	65673	65689.53846153847
+alice brown	499	65790	65696.71428571429
+alice carson	268	65713	65713.0
+alice carson	316	65559	65636.0
+alice carson	318	65695	65655.66666666667
+alice carson	376	65576	65635.75
+alice carson	380	65785	65665.6
+alice carson	390	65747	65679.16666666667
+alice carson	404	65710	65683.57142857143
+alice carson	427	65559	65668.0
+alice carson	473	65565	65656.55555555556
+alice carson	508	65545	65645.4
+alice davidson	270	65563	65563.0
+alice davidson	272	65742	65652.5
+alice davidson	287	65747	65684.0
+alice davidson	298	65554	65651.5
+alice davidson	308	65560	65633.2
+alice davidson	321	65677	65640.5
+alice davidson	328	65547	65627.14285714286
+alice davidson	384	65676	65633.25
+alice davidson	402	65544	65623.33333333333
+alice davidson	408	65707	65631.7
+alice davidson	408	65791	65646.18181818182
+alice davidson	423	65740	65654.0
+alice davidson	431	65677	65655.76923076923
+alice davidson	437	65690	65658.21428571429
+alice davidson	445	65590	65653.66666666667
+alice davidson	448	65641	65652.875
+alice davidson	479	65631	65651.58823529411
+alice davidson	487	65596	65648.5
+alice ellison	256	65744	65744.0
+alice ellison	274	65537	65640.5
+alice ellison	296	65741	65674.0
+alice ellison	313	65612	65658.5
+alice ellison	320	65745	65675.8
+alice ellison	331	65557	65656.0
+alice ellison	335	65730	65666.57142857143
+alice ellison	343	65787	65681.625
+alice ellison	354	65698	65683.44444444444
+alice ellison	355	65699	65685.0
+alice ellison	374	65677	65684.27272727272
+alice ellison	403	65544	65672.58333333333
+alice ellison	405	65713	65675.69230769231
+alice ellison	482	65681	65676.07142857143
+alice ellison	490	65572	65669.13333333333
+alice falkner	280	65597	65597.0
+alice falkner	311	65715	65656.0
+alice falkner	323	65669	65660.33333333333
+alice falkner	339	65785	65691.5
+alice falkner	342	65752	65703.6
+alice falkner	345	65773	65715.16666666667
+alice falkner	371	65710	65714.42857142857
+alice falkner	382	65622	65702.875
+alice falkner	382	65690	65701.44444444444
+alice falkner	389	65699	65701.2
+alice falkner	393	65611	65693.0
+alice falkner	393	65685	65692.33333333333
+alice falkner	452	65596	65684.92307692308
+alice falkner	455	65718	65687.28571428571
+alice falkner	477	65722	65689.6
+alice falkner	481	65709	65690.8125
+alice falkner	500	65775	65695.76470588235
+alice garcia	263	65630	65630.0
+alice garcia	299	65623	65626.5
+alice garcia	309	65746	65666.33333333333
+alice garcia	325	65573	65643.0
+alice garcia	331	65734	65661.2
+alice garcia	366	65744	65675.0
+alice garcia	379	65746	65685.14285714286
+alice garcia	388	65675	65683.875
+alice garcia	427	65674	65682.77777777778
+alice garcia	446	65613	65675.8
+alice garcia	446	65759	65683.36363636363
+alice garcia	459	65712	65685.75
+alice garcia	486	65725	65688.76923076923
+alice hernandez	270	65717	65717.0
+alice hernandez	290	65685	65701.0
+alice hernandez	296	65569	65657.0
+alice hernandez	320	65700	65667.75
+alice hernandez	323	65727	65679.6
+PREHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select s, si, i, min(i) over (partition by s order by si, i range between unbounded preceding and current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+alice allen	400	65557	65557
+alice allen	451	65662	65557
+alice allen	462	65545	65545
+alice allen	472	65609	65545
+alice allen	484	65600	65545
+alice allen	501	65670	65545
+alice allen	501	65720	65545
+alice allen	509	65758	65545
+alice brown	302	65711	65711
+alice brown	324	65569	65569
+alice brown	332	65781	65569
+alice brown	337	65707	65569
+alice brown	346	65696	65569
+alice brown	376	65708	65569
+alice brown	381	65704	65569
+alice brown	399	65779	65569
+alice brown	409	65667	65569
+alice brown	425	65570	65569
+alice brown	452	65666	65569
+alice brown	471	65733	65569
+alice brown	492	65673	65569
+alice brown	499	65790	65569
+alice carson	268	65713	65713
+alice carson	316	65559	65559
+alice carson	318	65695	65559
+alice carson	376	65576	65559
+alice carson	380	65785	65559
+alice carson	390	65747	65559
+alice carson	404	65710	65559
+alice carson	427	65559	65559
+alice carson	473	65565	65559
+alice carson	508	65545	65545
+alice davidson	270	65563	65563
+alice davidson	272	65742	65563
+alice davidson	287	65747	65563
+alice davidson	298	65554	65554
+alice davidson	308	65560	65554
+alice davidson	321	65677	65554
+alice davidson	328	65547	65547
+alice davidson	384	65676	65547
+alice davidson	402	65544	65544
+alice davidson	408	65707	65544
+alice davidson	408	65791	65544
+alice davidson	423	65740	65544
+alice davidson	431	65677	65544
+alice davidson	437	65690	65544
+alice davidson	445	65590	65544
+alice davidson	448	65641	65544
+alice davidson	479	65631	65544
+alice davidson	487	65596	65544
+alice ellison	256	65744	65744
+alice ellison	274	65537	65537
+alice ellison	296	65741	65537
+alice ellison	313	65612	65537
+alice ellison	320	65745	65537
+alice ellison	331	65557	65537
+alice ellison	335	65730	65537
+alice ellison	343	65787	65537
+alice ellison	354	65698	65537
+alice ellison	355	65699	65537
+alice ellison	374	65677	65537
+alice ellison	403	65544	65537
+alice ellison	405	65713	65537
+alice ellison	482	65681	65537
+alice ellison	490	65572	65537
+alice falkner	280	65597	65597
+alice falkner	311	65715	65597
+alice falkner	323	65669	65597
+alice falkner	339	65785	65597
+alice falkner	342	65752	65597
+alice falkner	345	65773	65597
+alice falkner	371	65710	65597
+alice falkner	382	65622	65597
+alice falkner	382	65690	65597
+alice falkner	389	65699	65597
+alice falkner	393	65611	65597
+alice falkner	393	65685	65597
+alice falkner	452	65596	65596
+alice falkner	455	65718	65596
+alice falkner	477	65722	65596
+alice falkner	481	65709	65596
+alice falkner	500	65775	65596
+alice garcia	263	65630	65630
+alice garcia	299	65623	65623
+alice garcia	309	65746	65623
+alice garcia	325	65573	65573
+alice garcia	331	65734	65573
+alice garcia	366	65744	65573
+alice garcia	379	65746	65573
+alice garcia	388	65675	65573
+alice garcia	427	65674	65573
+alice garcia	446	65613	65573
+alice garcia	446	65759	65573
+alice garcia	459	65712	65573
+alice garcia	486	65725	65573
+alice hernandez	270	65717	65717
+alice hernandez	290	65685	65685
+alice hernandez	296	65569	65569
+alice hernandez	320	65700	65569
+alice hernandez	323	65727	65569
+PREHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select s, si, i, avg(i) over (partition by s order by si, i desc range between unbounded preceding and current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+alice allen	400	65557	65557.0
+alice allen	451	65662	65609.5
+alice allen	462	65545	65588.0
+alice allen	472	65609	65593.25
+alice allen	484	65600	65594.6
+alice allen	501	65720	65615.5
+alice allen	501	65670	65623.28571428571
+alice allen	509	65758	65640.125
+alice brown	302	65711	65711.0
+alice brown	324	65569	65640.0
+alice brown	332	65781	65687.0
+alice brown	337	65707	65692.0
+alice brown	346	65696	65692.8
+alice brown	376	65708	65695.33333333333
+alice brown	381	65704	65696.57142857143
+alice brown	399	65779	65706.875
+alice brown	409	65667	65702.44444444444
+alice brown	425	65570	65689.2
+alice brown	452	65666	65687.09090909091
+alice brown	471	65733	65690.91666666667
+alice brown	492	65673	65689.53846153847
+alice brown	499	65790	65696.71428571429
+alice carson	268	65713	65713.0
+alice carson	316	65559	65636.0
+alice carson	318	65695	65655.66666666667
+alice carson	376	65576	65635.75
+alice carson	380	65785	65665.6
+alice carson	390	65747	65679.16666666667
+alice carson	404	65710	65683.57142857143
+alice carson	427	65559	65668.0
+alice carson	473	65565	65656.55555555556
+alice carson	508	65545	65645.4
+alice davidson	270	65563	65563.0
+alice davidson	272	65742	65652.5
+alice davidson	287	65747	65684.0
+alice davidson	298	65554	65651.5
+alice davidson	308	65560	65633.2
+alice davidson	321	65677	65640.5
+alice davidson	328	65547	65627.14285714286
+alice davidson	384	65676	65633.25
+alice davidson	402	65544	65623.33333333333
+alice davidson	408	65791	65640.1
+alice davidson	408	65707	65646.18181818182
+alice davidson	423	65740	65654.0
+alice davidson	431	65677	65655.76923076923
+alice davidson	437	65690	65658.21428571429
+alice davidson	445	65590	65653.66666666667
+alice davidson	448	65641	65652.875
+alice davidson	479	65631	65651.58823529411
+alice davidson	487	65596	65648.5
+alice ellison	256	65744	65744.0
+alice ellison	274	65537	65640.5
+alice ellison	296	65741	65674.0
+alice ellison	313	65612	65658.5
+alice ellison	320	65745	65675.8
+alice ellison	331	65557	65656.0
+alice ellison	335	65730	65666.57142857143
+alice ellison	343	65787	65681.625
+alice ellison	354	65698	65683.44444444444
+alice ellison	355	65699	65685.0
+alice ellison	374	65677	65684.27272727272
+alice ellison	403	65544	65672.58333333333
+alice ellison	405	65713	65675.69230769231
+alice ellison	482	65681	65676.07142857143
+alice ellison	490	65572	65669.13333333333
+alice falkner	280	65597	65597.0
+alice falkner	311	65715	65656.0
+alice falkner	323	65669	65660.33333333333
+alice falkner	339	65785	65691.5
+alice falkner	342	65752	65703.6
+alice falkner	345	65773	65715.16666666667
+alice falkner	371	65710	65714.42857142857
+alice falkner	382	65690	65711.375
+alice falkner	382	65622	65701.44444444444
+alice falkner	389	65699	65701.2
+alice falkner	393	65685	65699.72727272728
+alice falkner	393	65611	65692.33333333333
+alice falkner	452	65596	65684.92307692308
+alice falkner	455	65718	65687.28571428571
+alice falkner	477	65722	65689.6
+alice falkner	481	65709	65690.8125
+alice falkner	500	65775	65695.76470588235
+alice garcia	263	65630	65630.0
+alice garcia	299	65623	65626.5
+alice garcia	309	65746	65666.33333333333
+alice garcia	325	65573	65643.0
+alice garcia	331	65734	65661.2
+alice garcia	366	65744	65675.0
+alice garcia	379	65746	65685.14285714286
+alice garcia	388	65675	65683.875
+alice garcia	427	65674	65682.77777777778
+alice garcia	446	65759	65690.4
+alice garcia	446	65613	65683.36363636363
+alice garcia	459	65712	65685.75
+alice garcia	486	65725	65688.76923076923
+alice hernandez	270	65717	65717.0
+alice hernandez	290	65685	65701.0
+alice hernandez	296	65569	65657.0
+alice hernandez	320	65700	65667.75
+alice hernandez	323	65727	65679.6
+PREHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@over10k
+#### A masked pattern was here ####
+POSTHOOK: query: select si, bo, i, f, max(i) over (partition by si, bo order by i, f desc range between unbounded preceding and current row) from over10k limit 100
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@over10k
+#### A masked pattern was here ####
+256	false	65543	32.21	65543
+256	false	65549	23.72	65549
+256	false	65558	71.32	65558
+256	false	65580	64.81	65580
+256	false	65586	12.97	65586
+256	false	65596	5.35	65596
+256	false	65616	76.38	65616
+256	false	65620	51.72	65620
+256	false	65627	54.23	65627
+256	false	65640	32.64	65640
+256	false	65643	94.05	65643
+256	false	65706	83.67	65706
+256	false	65713	21.83	65713
+256	false	65737	3.38	65737
+256	false	65744	47.17	65744
+256	false	65752	61.21	65752
+256	false	65778	16.29	65778
+256	true	65540	49.44	65540
+256	true	65563	94.87	65563
+256	true	65599	89.55	65599
+256	true	65604	40.97	65604
+256	true	65613	93.29	65613
+256	true	65613	78.27	65613
+256	true	65615	20.66	65615
+256	true	65651	90.32	65651
+256	true	65653	8.1	65653
+256	true	65668	92.71	65668
+256	true	65693	62.52	65693
+256	true	65731	34.09	65731
+256	true	65733	70.53	65733
+256	true	65738	9.0	65738
+256	true	65741	54.8	65741
+256	true	65744	38.16	65744
+256	true	65747	32.18	65747
+256	true	65763	24.89	65763
+256	true	65778	74.15	65778
+256	true	65789	91.12	65789
+257	false	65541	51.26	65541
+257	false	65547	54.01	65547
+257	false	65560	42.14	65560
+257	false	65572	79.15	65572
+257	false	65574	19.96	65574
+257	false	65575	1.21	65575
+257	false	65578	61.6	65578
+257	false	65588	81.17	65588
+257	false	65594	78.39	65594
+257	false	65610	98.0	65610
+257	false	65691	80.76	65691
+257	false	65694	29.0	65694
+257	false	65711	60.88	65711
+257	false	65719	62.79	65719
+257	false	65722	79.05	65722
+257	false	65738	96.01	65738
+257	false	65756	24.44	65756
+257	false	65790	9.26	65790
+257	true	65542	62.59	65542
+257	true	65557	55.07	65557
+257	true	65566	68.54	65566
+257	true	65584	35.88	65584
+257	true	65610	47.58	65610
+257	true	65612	3.12	65612
+257	true	65626	23.18	65626
+257	true	65631	51.61	65631
+257	true	65638	95.35	65638
+257	true	65654	24.54	65654
+257	true	65654	9.8	65654
+257	true	65655	40.42	65655
+257	true	65699	15.36	65699
+257	true	65712	90.44	65712
+257	true	65720	24.4	65720
+257	true	65732	96.85	65732
+257	true	65748	32.52	65748
+257	true	65752	49.35	65752
+257	true	65771	95.58	65771
+257	true	65771	53.89	65771
+257	true	65771	48.5	65771
+257	true	65781	17.33	65781
+258	false	65565	98.19	65565
+258	false	65569	66.81	65569
+258	false	65573	31.45	65573
+258	false	65582	67.28	65582
+258	false	65584	64.92	65584
+258	false	65606	35.52	65606
+258	false	65656	79.17	65656
+258	false	65669	75.01	65669
+258	false	65717	95.76	65717
+258	false	65724	70.0	65724
+258	false	65728	9.05	65728
+258	false	65761	33.73	65761
+258	false	65762	15.22	65762
+258	false	65770	13.38	65770
+258	false	65771	52.63	65771
+258	false	65781	1.92	65781
+258	true	65546	91.19	65546
+258	true	65551	91.56	65551
+258	true	65551	88.97	65551
+258	true	65568	81.41	65568
+258	true	65568	13.57	65568
+258	true	65579	47.52	65579
+258	true	65603	2.61	65603


[50/51] [abbrv] hive git commit: HIVE-13285: Orc concatenation may drop old files from moving to final path (Prasanth Jayachandran reviewed by Gopal V)

Posted by jd...@apache.org.
HIVE-13285: Orc concatenation may drop old files from moving to final path (Prasanth Jayachandran reviewed by Gopal V)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a17122f4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a17122f4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a17122f4

Branch: refs/heads/llap
Commit: a17122f45daf391454a73895bb651d97f6e6fdc8
Parents: 0b57450
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Wed Mar 16 14:01:52 2016 -0700
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Wed Mar 16 14:01:52 2016 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |  1 +
 .../hive/ql/exec/AbstractFileMergeOperator.java | 23 ++++---
 .../hive/ql/exec/OrcFileMergeOperator.java      | 14 ++--
 .../clientpositive/orc_merge_incompat3.q        | 14 ++++
 .../clientpositive/orc_merge_incompat3.q.out    | 70 ++++++++++++++++++++
 .../tez/orc_merge_incompat3.q.out               | 70 ++++++++++++++++++++
 6 files changed, 176 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index f991d49..39ba628 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -176,6 +176,7 @@ minitez.query.files.shared=acid_globallimit.q,\
   orc_merge11.q,\
   orc_merge_incompat1.q,\
   orc_merge_incompat2.q,\
+  orc_merge_incompat3.q,\
   orc_vectorization_ppd.q,\
   parallel.q,\
   ptf.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
index f99bf11..154a78b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java
@@ -208,18 +208,23 @@ public abstract class AbstractFileMergeOperator<T extends FileMergeDesc>
   @Override
   public void closeOp(boolean abort) throws HiveException {
     try {
-      if (!exception) {
-        FileStatus fss = fs.getFileStatus(outPath);
-        if (!fs.rename(outPath, finalPath)) {
-          throw new IOException(
-              "Unable to rename " + outPath + " to " + finalPath);
+      if (!abort) {
+        // if outPath does not exist, then it means all paths within combine split are skipped as
+        // they are incompatible for merge (for example: files without stripe stats).
+        // Those files will be added to incompatFileSet
+        if (fs.exists(outPath)) {
+          FileStatus fss = fs.getFileStatus(outPath);
+          if (!fs.rename(outPath, finalPath)) {
+            throw new IOException(
+                "Unable to rename " + outPath + " to " + finalPath);
+          }
+          LOG.info("renamed path " + outPath + " to " + finalPath + " . File" +
+              " size is "
+              + fss.getLen());
         }
-        LOG.info("renamed path " + outPath + " to " + finalPath + " . File" +
-            " size is "
-            + fss.getLen());
 
         // move any incompatible files to final path
-        if (!incompatFileSet.isEmpty()) {
+        if (incompatFileSet != null && !incompatFileSet.isEmpty()) {
           for (Path incompatFile : incompatFileSet) {
             Path destDir = finalPath.getParent();
             try {

http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
index 445cf3d..e554ab1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java
@@ -231,22 +231,22 @@ public class OrcFileMergeOperator extends
 
   @Override
   public void closeOp(boolean abort) throws HiveException {
-    // close writer
-    if (outWriter == null) {
-      return;
-    }
-
     try {
       if (fdis != null) {
         fdis.close();
         fdis = null;
       }
 
-      outWriter.close();
-      outWriter = null;
+      if (outWriter != null) {
+        outWriter.close();
+        outWriter = null;
+      }
     } catch (Exception e) {
       throw new HiveException("Unable to close OrcFileMergeOperator", e);
     }
+
+    // When there are no exceptions, this has to be called always to make sure incompatible files
+    // are moved properly to the destination path
     super.closeOp(abort);
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/ql/src/test/queries/clientpositive/orc_merge_incompat3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat3.q b/ql/src/test/queries/clientpositive/orc_merge_incompat3.q
new file mode 100644
index 0000000..d6be111
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_merge_incompat3.q
@@ -0,0 +1,14 @@
+create table concat_incompat like alltypesorc;
+
+load data local inpath '../../data/files/alltypesorc' into table concat_incompat;
+load data local inpath '../../data/files/alltypesorc' into table concat_incompat;
+load data local inpath '../../data/files/alltypesorc' into table concat_incompat;
+load data local inpath '../../data/files/alltypesorc' into table concat_incompat;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/concat_incompat/;
+select count(*) from concat_incompat;
+
+ALTER TABLE concat_incompat CONCATENATE;
+
+dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/concat_incompat/;
+select count(*) from concat_incompat;

http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/ql/src/test/results/clientpositive/orc_merge_incompat3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat3.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat3.q.out
new file mode 100644
index 0000000..e34492b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/orc_merge_incompat3.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create table concat_incompat like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: create table concat_incompat like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from concat_incompat
+PREHOOK: type: QUERY
+PREHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from concat_incompat
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+49152
+PREHOOK: query: ALTER TABLE concat_incompat CONCATENATE
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@concat_incompat
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: ALTER TABLE concat_incompat CONCATENATE
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@concat_incompat
+POSTHOOK: Output: default@concat_incompat
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from concat_incompat
+PREHOOK: type: QUERY
+PREHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from concat_incompat
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+49152

http://git-wip-us.apache.org/repos/asf/hive/blob/a17122f4/ql/src/test/results/clientpositive/tez/orc_merge_incompat3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_merge_incompat3.q.out b/ql/src/test/results/clientpositive/tez/orc_merge_incompat3.q.out
new file mode 100644
index 0000000..e34492b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/orc_merge_incompat3.q.out
@@ -0,0 +1,70 @@
+PREHOOK: query: create table concat_incompat like alltypesorc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: create table concat_incompat like alltypesorc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+PREHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: load data local inpath '../../data/files/alltypesorc' into table concat_incompat
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@concat_incompat
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from concat_incompat
+PREHOOK: type: QUERY
+PREHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from concat_incompat
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+49152
+PREHOOK: query: ALTER TABLE concat_incompat CONCATENATE
+PREHOOK: type: ALTER_TABLE_MERGE
+PREHOOK: Input: default@concat_incompat
+PREHOOK: Output: default@concat_incompat
+POSTHOOK: query: ALTER TABLE concat_incompat CONCATENATE
+POSTHOOK: type: ALTER_TABLE_MERGE
+POSTHOOK: Input: default@concat_incompat
+POSTHOOK: Output: default@concat_incompat
+Found 4 items
+#### A masked pattern was here ####
+PREHOOK: query: select count(*) from concat_incompat
+PREHOOK: type: QUERY
+PREHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+POSTHOOK: query: select count(*) from concat_incompat
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@concat_incompat
+#### A masked pattern was here ####
+49152


[41/51] [abbrv] hive git commit: HIVE-13243: Hive drop table on encyption zone fails for external tables (Chaoyu Tang, reviewed by Sergio Pena)

Posted by jd...@apache.org.
HIVE-13243: Hive drop table on encyption zone fails for external tables (Chaoyu Tang, reviewed by Sergio Pena)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d5b1adbb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d5b1adbb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d5b1adbb

Branch: refs/heads/llap
Commit: d5b1adbb6638acd74fd4162a6ed408ebd72abeae
Parents: f87b2b6
Author: ctang <ct...@cloudera.com>
Authored: Tue Mar 15 11:41:41 2016 -0400
Committer: ctang <ct...@cloudera.com>
Committed: Tue Mar 15 11:41:41 2016 -0400

----------------------------------------------------------------------
 .../hadoop/hive/metastore/HiveMetaStore.java    | 34 ++++++---
 .../clientpositive/encryption_drop_partition.q  | 10 +++
 .../clientpositive/encryption_drop_table.q      |  9 ++-
 .../encrypted/encryption_drop_partition.q.out   | 76 ++++++++++++++++++++
 .../encrypted/encryption_drop_table.q.out       | 27 +++++++
 5 files changed, 144 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d5b1adbb/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index f0bc560..2fa0e9a 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1508,8 +1508,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
         }
 
-        // tblPath will be null when tbl is a view. We skip the following if block in that case.
-        checkTrashPurgeCombination(tblPath, dbname + "." + name, ifPurge);
+        checkTrashPurgeCombination(tblPath, dbname + "." + name, ifPurge, deleteData && !isExternal);
         // Drop the partitions and get a list of locations which need to be deleted
         partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath,
             tbl.getPartitionKeys(), deleteData && !isExternal);
@@ -1546,15 +1545,20 @@ public class HiveMetaStore extends ThriftHiveMetastore {
      * @param objectName db.table, or db.table.part
      * @param ifPurge if PURGE options is specified
      */
-    private void checkTrashPurgeCombination(Path pathToData, String objectName, boolean ifPurge)
-      throws MetaException {
-      if (!(pathToData != null && !ifPurge)) {//pathToData may be NULL for a view
+    private void checkTrashPurgeCombination(Path pathToData, String objectName, boolean ifPurge,
+        boolean deleteData) throws MetaException {
+      // There is no need to check TrashPurgeCombination in following cases since Purge/Trash
+      // is not applicable:
+      // a) deleteData is false -- drop an external table
+      // b) pathToData is null -- a view
+      // c) ifPurge is true -- force delete without Trash
+      if (!deleteData || pathToData == null || ifPurge) {
         return;
       }
 
       boolean trashEnabled = false;
       try {
-  trashEnabled = 0 < hiveConf.getFloat("fs.trash.interval", -1);
+        trashEnabled = 0 < hiveConf.getFloat("fs.trash.interval", -1);
       } catch(NumberFormatException ex) {
   // nothing to do
       }
@@ -2644,11 +2648,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean isArchived = false;
       Path archiveParentDir = null;
       boolean mustPurge = false;
+      boolean isExternalTbl = false;
 
       try {
         ms.openTransaction();
         part = ms.getPartition(db_name, tbl_name, part_vals);
         tbl = get_table_core(db_name, tbl_name);
+        isExternalTbl = isExternal(tbl);
         firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
         mustPurge = isMustPurge(envContext, tbl);
 
@@ -2661,7 +2667,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if (isArchived) {
           archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
           verifyIsWritablePath(archiveParentDir);
-          checkTrashPurgeCombination(archiveParentDir, db_name + "." + tbl_name + "." + part_vals, mustPurge);
+          checkTrashPurgeCombination(archiveParentDir, db_name + "." + tbl_name + "." + part_vals,
+              mustPurge, deleteData && !isExternalTbl);
         }
         if (!ms.dropPartition(db_name, tbl_name, part_vals)) {
           throw new MetaException("Unable to drop partition");
@@ -2670,13 +2677,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
           partPath = new Path(part.getSd().getLocation());
           verifyIsWritablePath(partPath);
-          checkTrashPurgeCombination(partPath, db_name + "." + tbl_name + "." + part_vals, mustPurge);
+          checkTrashPurgeCombination(partPath, db_name + "." + tbl_name + "." + part_vals,
+              mustPurge, deleteData && !isExternalTbl);
         }
       } finally {
         if (!success) {
           ms.rollbackTransaction();
         } else if (deleteData && ((partPath != null) || (archiveParentDir != null))) {
-          if (tbl != null && !isExternal(tbl)) {
+          if (!isExternalTbl) {
             if (mustPurge) {
               LOG.info("dropPartition() will purge " + partPath + " directly, skipping trash.");
             }
@@ -2761,10 +2769,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Table tbl = null;
       List<Partition> parts = null;
       boolean mustPurge = false;
+      boolean isExternalTbl = false;
       try {
         // We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
         // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
         tbl = get_table_core(dbName, tblName);
+        isExternalTbl = isExternal(tbl);
         mustPurge = isMustPurge(envContext, tbl);
         int minCount = 0;
         RequestPartsSpec spec = request.getParts();
@@ -2829,13 +2839,15 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           if (MetaStoreUtils.isArchived(part)) {
             Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part);
             verifyIsWritablePath(archiveParentDir);
-            checkTrashPurgeCombination(archiveParentDir, dbName + "." + tblName + "." + part.getValues(), mustPurge);
+            checkTrashPurgeCombination(archiveParentDir, dbName + "." + tblName + "." +
+                part.getValues(), mustPurge, deleteData && !isExternalTbl);
             archToDelete.add(archiveParentDir);
           }
           if ((part.getSd() != null) && (part.getSd().getLocation() != null)) {
             Path partPath = new Path(part.getSd().getLocation());
             verifyIsWritablePath(partPath);
-            checkTrashPurgeCombination(partPath, dbName + "." + tblName + "." + part.getValues(), mustPurge);
+            checkTrashPurgeCombination(partPath, dbName + "." + tblName + "." + part.getValues(),
+                mustPurge, deleteData && !isExternalTbl);
             dirsToDelete.add(new PathAndPartValSize(partPath, part.getValues().size()));
           }
         }

http://git-wip-us.apache.org/repos/asf/hive/blob/d5b1adbb/ql/src/test/queries/clientpositive/encryption_drop_partition.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_drop_partition.q b/ql/src/test/queries/clientpositive/encryption_drop_partition.q
index a26aa19..57dfabd 100644
--- a/ql/src/test/queries/clientpositive/encryption_drop_partition.q
+++ b/ql/src/test/queries/clientpositive/encryption_drop_partition.q
@@ -12,7 +12,17 @@ CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.
 
 INSERT INTO encrypted_table_dp PARTITION(p)(p,key,value) values('2014-09-23', 1, 'foo'),('2014-09-24', 2, 'bar');
 SELECT * FROM encrypted_table_dp;
+
+CREATE EXTERNAL TABLE encrypted_ext_table_dp (key INT, value STRING) partitioned by (p STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table_dp';
+ALTER TABLE encrypted_ext_table_dp ADD PARTITION (p='2014-09-23') LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table_dp/p=2014-09-23';
+SELECT * FROM encrypted_ext_table_dp;
+ALTER TABLE encrypted_ext_table_dp DROP PARTITION (p='2014-09-23');
+SELECT * FROM encrypted_ext_table_dp;
+DROP TABLE encrypted_ext_table_dp;
+
+SELECT * FROM encrypted_table_dp;
 ALTER TABLE encrypted_table_dp DROP PARTITION (p='2014-09-23');
 SELECT * FROM encrypted_table_dp;
 ALTER TABLE encrypted_table_dp DROP PARTITION (p='2014-09-23') PURGE;
 SELECT * FROM encrypted_table_dp;
+DROP TABLE encrypted_table_dp PURGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/d5b1adbb/ql/src/test/queries/clientpositive/encryption_drop_table.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/encryption_drop_table.q b/ql/src/test/queries/clientpositive/encryption_drop_table.q
index 193beea..2ae3c69 100644
--- a/ql/src/test/queries/clientpositive/encryption_drop_table.q
+++ b/ql/src/test/queries/clientpositive/encryption_drop_table.q
@@ -10,9 +10,16 @@ CRYPTO CREATE_KEY --keyName key_128 --bitLength 128;
 CRYPTO CREATE_ZONE --keyName key_128 --path ${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table;
 
 INSERT OVERWRITE TABLE encrypted_table SELECT * FROM src;
+
+CREATE EXTERNAL TABLE encrypted_ext_table (key INT, value STRING) LOCATION '${hiveconf:hive.metastore.warehouse.dir}/default/encrypted_table';
+SHOW TABLES;
+
+DROP TABLE default.encrypted_ext_table;
 SHOW TABLES;
+
 DROP TABLE default.encrypted_table;
 SHOW TABLES;
+
 DROP TABLE default.encrypted_table PURGE;
 SHOW TABLES;
-CRYPTO DELETE_KEY --keyName key_128;
\ No newline at end of file
+CRYPTO DELETE_KEY --keyName key_128;

http://git-wip-us.apache.org/repos/asf/hive/blob/d5b1adbb/ql/src/test/results/clientpositive/encrypted/encryption_drop_partition.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_drop_partition.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_drop_partition.q.out
index 067bf82..2643006 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_drop_partition.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_drop_partition.q.out
@@ -41,6 +41,74 @@ POSTHOOK: Input: default@encrypted_table_dp@p=2014-09-24
 #### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
 1	foo	2014-09-23
 2	bar	2014-09-24
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@encrypted_ext_table_dp
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@encrypted_ext_table_dp
+#### A masked pattern was here ####
+PREHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+PREHOOK: Output: default@encrypted_ext_table_dp
+#### A masked pattern was here ####
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+#### A masked pattern was here ####
+POSTHOOK: Output: default@encrypted_ext_table_dp
+POSTHOOK: Output: default@encrypted_ext_table_dp@p=2014-09-23
+PREHOOK: query: SELECT * FROM encrypted_ext_table_dp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@encrypted_ext_table_dp
+PREHOOK: Input: default@encrypted_ext_table_dp@p=2014-09-23
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+POSTHOOK: query: SELECT * FROM encrypted_ext_table_dp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@encrypted_ext_table_dp
+POSTHOOK: Input: default@encrypted_ext_table_dp@p=2014-09-23
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+1	foo	2014-09-23
+PREHOOK: query: ALTER TABLE encrypted_ext_table_dp DROP PARTITION (p='2014-09-23')
+PREHOOK: type: ALTERTABLE_DROPPARTS
+PREHOOK: Input: default@encrypted_ext_table_dp
+PREHOOK: Output: default@encrypted_ext_table_dp@p=2014-09-23
+POSTHOOK: query: ALTER TABLE encrypted_ext_table_dp DROP PARTITION (p='2014-09-23')
+POSTHOOK: type: ALTERTABLE_DROPPARTS
+POSTHOOK: Input: default@encrypted_ext_table_dp
+POSTHOOK: Output: default@encrypted_ext_table_dp@p=2014-09-23
+PREHOOK: query: SELECT * FROM encrypted_ext_table_dp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@encrypted_ext_table_dp
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+POSTHOOK: query: SELECT * FROM encrypted_ext_table_dp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@encrypted_ext_table_dp
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+PREHOOK: query: DROP TABLE encrypted_ext_table_dp
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@encrypted_ext_table_dp
+PREHOOK: Output: default@encrypted_ext_table_dp
+POSTHOOK: query: DROP TABLE encrypted_ext_table_dp
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@encrypted_ext_table_dp
+POSTHOOK: Output: default@encrypted_ext_table_dp
+PREHOOK: query: SELECT * FROM encrypted_table_dp
+PREHOOK: type: QUERY
+PREHOOK: Input: default@encrypted_table_dp
+PREHOOK: Input: default@encrypted_table_dp@p=2014-09-23
+PREHOOK: Input: default@encrypted_table_dp@p=2014-09-24
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+POSTHOOK: query: SELECT * FROM encrypted_table_dp
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@encrypted_table_dp
+POSTHOOK: Input: default@encrypted_table_dp@p=2014-09-23
+POSTHOOK: Input: default@encrypted_table_dp@p=2014-09-24
+#### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
+1	foo	2014-09-23
+2	bar	2014-09-24
 PREHOOK: query: ALTER TABLE encrypted_table_dp DROP PARTITION (p='2014-09-23')
 PREHOOK: type: ALTERTABLE_DROPPARTS
 PREHOOK: Input: default@encrypted_table_dp
@@ -79,3 +147,11 @@ POSTHOOK: Input: default@encrypted_table_dp
 POSTHOOK: Input: default@encrypted_table_dp@p=2014-09-24
 #### A PARTIAL masked pattern was here #### data/warehouse/default/encrypted_table_dp/.hive-staging
 2	bar	2014-09-24
+PREHOOK: query: DROP TABLE encrypted_table_dp PURGE
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@encrypted_table_dp
+PREHOOK: Output: default@encrypted_table_dp
+POSTHOOK: query: DROP TABLE encrypted_table_dp PURGE
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@encrypted_table_dp
+POSTHOOK: Output: default@encrypted_table_dp

http://git-wip-us.apache.org/repos/asf/hive/blob/d5b1adbb/ql/src/test/results/clientpositive/encrypted/encryption_drop_table.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_drop_table.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_drop_table.q.out
index 55eefa0..c5007ee 100644
--- a/ql/src/test/results/clientpositive/encrypted/encryption_drop_table.q.out
+++ b/ql/src/test/results/clientpositive/encrypted/encryption_drop_table.q.out
@@ -24,6 +24,33 @@ POSTHOOK: Input: default@src
 POSTHOOK: Output: default@encrypted_table
 POSTHOOK: Lineage: encrypted_table.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
 POSTHOOK: Lineage: encrypted_table.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+#### A masked pattern was here ####
+PREHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+PREHOOK: Output: database:default
+PREHOOK: Output: default@encrypted_ext_table
+#### A masked pattern was here ####
+POSTHOOK: type: CREATETABLE
+#### A masked pattern was here ####
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@encrypted_ext_table
+PREHOOK: query: SHOW TABLES
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: SHOW TABLES
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+encrypted_ext_table
+encrypted_table
+src
+PREHOOK: query: DROP TABLE default.encrypted_ext_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@encrypted_ext_table
+PREHOOK: Output: default@encrypted_ext_table
+POSTHOOK: query: DROP TABLE default.encrypted_ext_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@encrypted_ext_table
+POSTHOOK: Output: default@encrypted_ext_table
 PREHOOK: query: SHOW TABLES
 PREHOOK: type: SHOWTABLES
 PREHOOK: Input: database:default


[40/51] [abbrv] hive git commit: HIVE-13233: Use min and max values to estimate better stats for comparison operators (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13233: Use min and max values to estimate better stats for comparison operators (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f87b2b63
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f87b2b63
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f87b2b63

Branch: refs/heads/llap
Commit: f87b2b6372fdf0b8e386a8364b49429e2ce7bad2
Parents: 4cd1101
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Thu Mar 10 19:14:58 2016 +0100
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Tue Mar 15 10:42:10 2016 +0100

----------------------------------------------------------------------
 .../stats/annotation/StatsRulesProcFactory.java | 184 ++++++++++++++++++-
 .../apache/hadoop/hive/ql/stats/StatsUtils.java |   2 +
 .../clientpositive/annotate_stats_filter.q      |  12 +-
 .../clientpositive/annotate_stats_filter.q.out  | 176 ++++++++++++++++--
 .../annotate_stats_join_pkfk.q.out              |  62 +++----
 .../clientpositive/annotate_stats_part.q.out    |  12 +-
 .../clientpositive/tez/explainuser_1.q.out      |  32 ++--
 7 files changed, 408 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
index f273d25..4bcf6bf 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
@@ -26,8 +26,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Stack;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.ErrorMsg;
 import org.apache.hadoop.hive.ql.exec.ColumnInfo;
@@ -80,6 +78,8 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull;
 import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -500,6 +500,181 @@ public class StatsRulesProcFactory {
       return maxNoNulls;
     }
 
+    private long evaluateComparator(Statistics stats, ExprNodeGenericFuncDesc genFunc) {
+      long numRows = stats.getNumRows();
+      GenericUDF udf = genFunc.getGenericUDF();
+
+      ExprNodeColumnDesc columnDesc;
+      ExprNodeConstantDesc constantDesc;
+      boolean upperBound;
+      String boundValue = null;
+      if (genFunc.getChildren().get(0) instanceof ExprNodeColumnDesc &&
+              genFunc.getChildren().get(1) instanceof ExprNodeConstantDesc) {
+        columnDesc = (ExprNodeColumnDesc) genFunc.getChildren().get(0);
+        constantDesc = (ExprNodeConstantDesc) genFunc.getChildren().get(1);
+        // Comparison to null will always return false
+        if (constantDesc.getValue() == null) {
+          return 0;
+        }
+        if (udf instanceof GenericUDFOPEqualOrGreaterThan ||
+                udf instanceof GenericUDFOPGreaterThan) {
+          boundValue = constantDesc.getValue().toString();
+          upperBound = false;
+        } else {
+          boundValue = constantDesc.getValue().toString();
+          upperBound = true;
+        }
+      } else if (genFunc.getChildren().get(1) instanceof ExprNodeColumnDesc &&
+              genFunc.getChildren().get(0) instanceof ExprNodeConstantDesc) {
+        columnDesc = (ExprNodeColumnDesc) genFunc.getChildren().get(1);
+        constantDesc = (ExprNodeConstantDesc) genFunc.getChildren().get(0);
+        // Comparison to null will always return false
+        if (constantDesc.getValue() == null) {
+          return 0;
+        }
+        if (udf instanceof GenericUDFOPEqualOrGreaterThan ||
+                udf instanceof GenericUDFOPGreaterThan) {
+          boundValue = constantDesc.getValue().toString();
+          upperBound = true;
+        } else {
+          boundValue = constantDesc.getValue().toString();
+          upperBound = false;
+        }
+      } else {
+        // default
+        return numRows / 3;
+      }
+
+      ColStatistics cs = stats.getColumnStatisticsFromColName(columnDesc.getColumn());
+      if (cs != null && cs.getRange() != null &&
+              cs.getRange().maxValue != null && cs.getRange().minValue != null) {
+        String colTypeLowerCase = columnDesc.getTypeString().toLowerCase();
+        try {
+          if (colTypeLowerCase.equals(serdeConstants.TINYINT_TYPE_NAME)) {
+            byte value = new Byte(boundValue);
+            byte maxValue = cs.getRange().maxValue.byteValue();
+            byte minValue = cs.getRange().minValue.byteValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          } else if (colTypeLowerCase.equals(serdeConstants.SMALLINT_TYPE_NAME)) {
+            short value = new Short(boundValue);
+            short maxValue = cs.getRange().maxValue.shortValue();
+            short minValue = cs.getRange().minValue.shortValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          } else if (colTypeLowerCase.equals(serdeConstants.INT_TYPE_NAME) ||
+                  colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
+            // Date is an integer internally
+            int value = new Integer(boundValue);
+            int maxValue = cs.getRange().maxValue.intValue();
+            int minValue = cs.getRange().minValue.intValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          } else if (colTypeLowerCase.equals(serdeConstants.BIGINT_TYPE_NAME)) {
+            long value = new Long(boundValue);
+            long maxValue = cs.getRange().maxValue.longValue();
+            long minValue = cs.getRange().minValue.longValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          } else if (colTypeLowerCase.equals(serdeConstants.FLOAT_TYPE_NAME)) {
+            float value = new Float(boundValue);
+            float maxValue = cs.getRange().maxValue.floatValue();
+            float minValue = cs.getRange().minValue.floatValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          } else if (colTypeLowerCase.equals(serdeConstants.DOUBLE_TYPE_NAME)) {
+            double value = new Double(boundValue);
+            double maxValue = cs.getRange().maxValue.doubleValue();
+            double minValue = cs.getRange().minValue.doubleValue();
+            if (upperBound) {
+              if (maxValue < value) {
+                return numRows;
+              }
+              if (minValue > value) {
+                return 0;
+              }
+            } else {
+              if (minValue > value) {
+                return numRows;
+              }
+              if (maxValue < value) {
+                return 0;
+              }
+            }
+          }
+        } catch (NumberFormatException nfe) {
+          return numRows / 3;
+        }
+      }
+      // default
+      return numRows / 3;
+    }
+
     private long evaluateChildExpr(Statistics stats, ExprNodeDesc child,
         AnnotateStatsProcCtx aspCtx, List<String> neededCols,
         FilterOperator fop, long evaluatedRowCount) throws CloneNotSupportedException {
@@ -578,9 +753,10 @@ public class StatsRulesProcFactory {
         } else if (udf instanceof GenericUDFOPNotEqual) {
           return numRows;
         } else if (udf instanceof GenericUDFOPEqualOrGreaterThan
-            || udf instanceof GenericUDFOPEqualOrLessThan || udf instanceof GenericUDFOPGreaterThan
+            || udf instanceof GenericUDFOPEqualOrLessThan
+            || udf instanceof GenericUDFOPGreaterThan
             || udf instanceof GenericUDFOPLessThan) {
-          return numRows / 3;
+          return evaluateComparator(stats, genFunc);
         } else if (udf instanceof GenericUDFOPNotNull) {
             return evaluateNotNullExpr(stats, genFunc);
         } else if (udf instanceof GenericUDFOPNull) {

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
index 9d139ba..d8acf94 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
@@ -724,6 +724,8 @@ public class StatsUtils {
       }
     } else if (colTypeLowerCase.equals(serdeConstants.DATE_TYPE_NAME)) {
       cs.setAvgColLen(JavaDataModel.get().lengthOfDate());
+      cs.setRange(csd.getDateStats().getLowValue().getDaysSinceEpoch(),
+              csd.getDateStats().getHighValue().getDaysSinceEpoch());
     } else {
       // Columns statistics for complex datatypes are not supported yet
       return null;

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/test/queries/clientpositive/annotate_stats_filter.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/annotate_stats_filter.q b/ql/src/test/queries/clientpositive/annotate_stats_filter.q
index 436c053..a352a77 100644
--- a/ql/src/test/queries/clientpositive/annotate_stats_filter.q
+++ b/ql/src/test/queries/clientpositive/annotate_stats_filter.q
@@ -83,9 +83,17 @@ explain select * from loc_orc where (year=2001 and year is null) or (state='CA')
 -- numRows: 1 rawDataSize: 102
 explain select * from loc_orc where (year=2001 or year is null) and (state='CA');
 
--- all inequality conditions rows/3 is the rules
--- numRows: 2 rawDataSize: 204
+-- inequality conditions falling out of range. total or zero (converted to one)
+-- numRows: 1 rawDataSize: 102
+-- numRows: 8 rawDataSize: 804
 explain select * from loc_orc where locid < 30;
 explain select * from loc_orc where locid > 30;
 explain select * from loc_orc where locid <= 30;
 explain select * from loc_orc where locid >= 30;
+
+-- all inequality conditions falling within range. rows/3 is the rules
+-- numRows: 2 rawDataSize: 204
+explain select * from loc_orc where locid < 3;
+explain select * from loc_orc where locid > 3;
+explain select * from loc_orc where locid <= 3;
+explain select * from loc_orc where locid >= 3;

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
index b09ad03..7e697f1 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out
@@ -856,12 +856,14 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-PREHOOK: query: -- all inequality conditions rows/3 is the rules
--- numRows: 2 rawDataSize: 204
+PREHOOK: query: -- inequality conditions falling out of range. total or zero (converted to one)
+-- numRows: 1 rawDataSize: 102
+-- numRows: 8 rawDataSize: 804
 explain select * from loc_orc where locid < 30
 PREHOOK: type: QUERY
-POSTHOOK: query: -- all inequality conditions rows/3 is the rules
--- numRows: 2 rawDataSize: 204
+POSTHOOK: query: -- inequality conditions falling out of range. total or zero (converted to one)
+-- numRows: 1 rawDataSize: 102
+-- numRows: 8 rawDataSize: 804
 explain select * from loc_orc where locid < 30
 POSTHOOK: type: QUERY
 STAGE DEPENDENCIES:
@@ -877,14 +879,14 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid < 30) (type: boolean)
-              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -913,14 +915,14 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid > 30) (type: boolean)
-              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -949,14 +951,14 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid <= 30) (type: boolean)
-              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
                 outputColumnNames: _col0, _col1, _col2, _col3
-                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                 File Output Operator
                   compressed: false
-                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
                   table:
                       input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -985,6 +987,154 @@ STAGE PLANS:
             Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (locid >= 30) (type: boolean)
+              Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 102 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: -- all inequality conditions falling within range. rows/3 is the rules
+-- numRows: 2 rawDataSize: 204
+explain select * from loc_orc where locid < 3
+PREHOOK: type: QUERY
+POSTHOOK: query: -- all inequality conditions falling within range. rows/3 is the rules
+-- numRows: 2 rawDataSize: 204
+explain select * from loc_orc where locid < 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (locid < 3) (type: boolean)
+              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select * from loc_orc where locid > 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from loc_orc where locid > 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (locid > 3) (type: boolean)
+              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select * from loc_orc where locid <= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from loc_orc where locid <= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (locid <= 3) (type: boolean)
+              Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+              Select Operator
+                expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)
+                outputColumnNames: _col0, _col1, _col2, _col3
+                Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select * from loc_orc where locid >= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select * from loc_orc where locid >= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: loc_orc
+            Statistics: Num rows: 8 Data size: 804 Basic stats: COMPLETE Column stats: COMPLETE
+            Filter Operator
+              predicate: (locid >= 3) (type: boolean)
               Statistics: Num rows: 2 Data size: 204 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int)

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
index ff95252..64a57fe 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out
@@ -342,31 +342,31 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 0) (type: boolean)
-              Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: ss
             Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (ss_store_sk > 0) (type: boolean)
-              Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: ss_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -375,10 +375,10 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
           outputColumnNames: _col0
-          Statistics: Num rows: 136 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 136 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -472,16 +472,16 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((s_floor_space > 0) and s_store_sk is not null) (type: boolean)
-              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: ss
             Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
@@ -505,10 +505,10 @@ STAGE PLANS:
             0 _col0 (type: int)
             1 _col0 (type: int)
           outputColumnNames: _col0
-          Statistics: Num rows: 393 Data size: 1572 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 964 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 393 Data size: 1572 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 964 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -688,46 +688,46 @@ STAGE PLANS:
             Statistics: Num rows: 1000 Data size: 3856 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (ss_store_sk > 1000) (type: boolean)
-              Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: ss_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 333 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
             Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 1000) (type: boolean)
-              Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
             Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: (s_store_sk > 1000) (type: boolean)
-              Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -738,14 +738,14 @@ STAGE PLANS:
             1 _col0 (type: int)
             2 _col0 (type: int)
           outputColumnNames: _col1
-          Statistics: Num rows: 213 Data size: 852 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 213 Data size: 852 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 213 Data size: 852 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -789,16 +789,16 @@ STAGE PLANS:
             Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
             Filter Operator
               predicate: ((s_floor_space > 1000) and s_store_sk is not null) (type: boolean)
-              Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
               Select Operator
                 expressions: s_store_sk (type: int)
                 outputColumnNames: _col0
-                Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
-                  Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE
+                  Statistics: Num rows: 12 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
           TableScan
             alias: s
             Statistics: Num rows: 12 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
@@ -824,14 +824,14 @@ STAGE PLANS:
             1 _col0 (type: int)
             2 _col0 (type: int)
           outputColumnNames: _col1
-          Statistics: Num rows: 508 Data size: 2032 Basic stats: COMPLETE Column stats: COMPLETE
+          Statistics: Num rows: 821 Data size: 3284 Basic stats: COMPLETE Column stats: COMPLETE
           Select Operator
             expressions: _col1 (type: int)
             outputColumnNames: _col0
-            Statistics: Num rows: 508 Data size: 2032 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 821 Data size: 3284 Basic stats: COMPLETE Column stats: COMPLETE
             File Output Operator
               compressed: false
-              Statistics: Num rows: 508 Data size: 2032 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 821 Data size: 3284 Basic stats: COMPLETE Column stats: COMPLETE
               table:
                   input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                   output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/test/results/clientpositive/annotate_stats_part.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
index 186f7af..131cf6a 100644
--- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out
+++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out
@@ -493,11 +493,11 @@ STAGE PLANS:
           Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
-            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: locid (type: int)
               outputColumnNames: _col0
-              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: explain select locid,year from loc_orc where locid>0 and year='2001'
@@ -517,11 +517,11 @@ STAGE PLANS:
           Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
-            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: locid (type: int), '2001' (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 7 Data size: 644 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 
 PREHOOK: query: explain select * from (select locid,year from loc_orc) test where locid>0 and year='2001'
@@ -541,10 +541,10 @@ STAGE PLANS:
           Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
           Filter Operator
             predicate: (locid > 0) (type: boolean)
-            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: locid (type: int), '2001' (type: string)
               outputColumnNames: _col0, _col1
-              Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE
+              Statistics: Num rows: 7 Data size: 644 Basic stats: COMPLETE Column stats: COMPLETE
               ListSink
 

http://git-wip-us.apache.org/repos/asf/hive/blob/f87b2b63/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index b7a8174..b501f97 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -388,9 +388,9 @@ Stage-0
                   PartitionCols:_col0, _col1
                   Group By Operator [GBY_35] (rows=1 width=20)
                     Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col2, _col6
-                    Select Operator [SEL_34] (rows=2 width=16)
+                    Select Operator [SEL_34] (rows=3 width=16)
                       Output:["_col2","_col6"]
-                      Filter Operator [FIL_33] (rows=2 width=16)
+                      Filter Operator [FIL_33] (rows=3 width=16)
                         predicate:((_col1 > 0) or (_col6 >= 0))
                         Merge Join Operator [MERGEJOIN_52] (rows=3 width=16)
                           Conds:RS_30._col0=RS_31._col0(Inner),Output:["_col1","_col2","_col6"]
@@ -491,14 +491,14 @@ Stage-0
                       Output:["_col2","_col6"]
                       Filter Operator [FIL_30] (rows=1 width=16)
                         predicate:(((_col1 > 0) or (_col6 >= 0)) and ((_col6 >= 1) or (_col2 >= 1)) and ((UDFToLong(_col6) + _col2) >= 0))
-                        Merge Join Operator [MERGEJOIN_48] (rows=2 width=16)
+                        Merge Join Operator [MERGEJOIN_48] (rows=3 width=16)
                           Conds:RS_27._col0=RS_28._col0(Inner),Output:["_col1","_col2","_col6"]
                         <-Map 10 [SIMPLE_EDGE]
                           SHUFFLE [RS_28]
                             PartitionCols:_col0
-                            Select Operator [SEL_26] (rows=5 width=71)
+                            Select Operator [SEL_26] (rows=18 width=79)
                               Output:["_col0","_col1"]
-                              Filter Operator [FIL_46] (rows=5 width=71)
+                              Filter Operator [FIL_46] (rows=18 width=79)
                                 predicate:((c_int > 0) and key is not null)
                                 TableScan [TS_24] (rows=20 width=80)
                                   default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
@@ -664,14 +664,14 @@ Stage-0
                       Output:["_col2","_col6"]
                       Filter Operator [FIL_29] (rows=1 width=20)
                         predicate:(((_col1 + _col4) >= 0) and ((_col1 > 0) or (_col6 >= 0)) and ((_col6 >= 1) or (_col2 >= 1)) and ((UDFToLong(_col6) + _col2) >= 0))
-                        Merge Join Operator [MERGEJOIN_42] (rows=3 width=20)
+                        Merge Join Operator [MERGEJOIN_42] (rows=4 width=20)
                           Conds:RS_25._col0=RS_26._col0(Outer),RS_25._col0=RS_27._col0(Right Outer),Output:["_col1","_col2","_col4","_col6"]
                         <-Map 10 [SIMPLE_EDGE]
                           SHUFFLE [RS_27]
                             PartitionCols:_col0
-                            Select Operator [SEL_24] (rows=6 width=74)
+                            Select Operator [SEL_24] (rows=20 width=80)
                               Output:["_col0","_col1"]
-                              Filter Operator [FIL_41] (rows=6 width=74)
+                              Filter Operator [FIL_41] (rows=20 width=80)
                                 predicate:(c_int > 0)
                                 TableScan [TS_22] (rows=20 width=80)
                                   default@cbo_t3,cbo_t3,Tbl:COMPLETE,Col:COMPLETE,Output:["key","c_int"]
@@ -744,9 +744,9 @@ Stage-0
               PartitionCols:_col0, _col1
               Group By Operator [GBY_29] (rows=1 width=20)
                 Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col2, _col6
-                Select Operator [SEL_28] (rows=2 width=16)
+                Select Operator [SEL_28] (rows=3 width=16)
                   Output:["_col2","_col6"]
-                  Filter Operator [FIL_27] (rows=2 width=16)
+                  Filter Operator [FIL_27] (rows=3 width=16)
                     predicate:((_col1 > 0) or (_col6 >= 0))
                     Merge Join Operator [MERGEJOIN_43] (rows=3 width=16)
                       Conds:RS_24._col0=RS_25._col0(Inner),Output:["_col1","_col2","_col6"]
@@ -1201,9 +1201,9 @@ Stage-0
     Stage-1
       Reducer 3
       File Output Operator [FS_19]
-        Select Operator [SEL_18] (rows=14 width=101)
+        Select Operator [SEL_18] (rows=21 width=101)
           Output:["_col0","_col1","_col2","_col3","_col4"]
-          Filter Operator [FIL_17] (rows=14 width=101)
+          Filter Operator [FIL_17] (rows=21 width=101)
             predicate:((_col1 > 0) or (_col6 >= 0))
             Merge Join Operator [MERGEJOIN_28] (rows=21 width=101)
               Conds:RS_14._col0=RS_15._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col6"]
@@ -1257,9 +1257,9 @@ Stage-0
     Stage-1
       Reducer 2
       File Output Operator [FS_14]
-        Select Operator [SEL_13] (rows=12 width=101)
+        Select Operator [SEL_13] (rows=24 width=101)
           Output:["_col0","_col1","_col2","_col3","_col4"]
-          Filter Operator [FIL_12] (rows=12 width=101)
+          Filter Operator [FIL_12] (rows=24 width=101)
             predicate:(((_col1 + _col4) = 2) and ((_col1 > 0) or (_col6 >= 0)) and ((_col4 + 1) = 2))
             Merge Join Operator [MERGEJOIN_19] (rows=72 width=101)
               Conds:RS_8._col0=RS_9._col0(Right Outer),RS_8._col0=RS_10._col0(Right Outer),Output:["_col1","_col2","_col3","_col4","_col6"]
@@ -1487,9 +1487,9 @@ Stage-0
                     PartitionCols:_col0, _col1
                     Group By Operator [GBY_41] (rows=1 width=20)
                       Output:["_col0","_col1","_col2"],aggregations:["count()"],keys:_col2, _col6
-                      Select Operator [SEL_40] (rows=2 width=16)
+                      Select Operator [SEL_40] (rows=3 width=16)
                         Output:["_col2","_col6"]
-                        Filter Operator [FIL_39] (rows=2 width=16)
+                        Filter Operator [FIL_39] (rows=3 width=16)
                           predicate:((_col1 > 0) or (_col6 >= 0))
                           Merge Join Operator [MERGEJOIN_61] (rows=3 width=16)
                             Conds:RS_36._col0=RS_37._col0(Inner),Output:["_col1","_col2","_col6"]


[29/51] [abbrv] hive git commit: HIVE-13206: Create a test-sources.jar when -Psources profile is invoked (Sergio Pena, reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-13206: Create a test-sources.jar when -Psources profile is invoked (Sergio Pena, reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ceff0626
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ceff0626
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ceff0626

Branch: refs/heads/llap
Commit: ceff0626dc1951539f31f3c01fd739a32d92fcad
Parents: b6502b5
Author: Sergio Pena <se...@cloudera.com>
Authored: Fri Mar 11 09:55:18 2016 -0600
Committer: Sergio Pena <se...@cloudera.com>
Committed: Fri Mar 11 09:56:23 2016 -0600

----------------------------------------------------------------------
 beeline/pom.xml      | 22 ++++++++++++++++++++++
 common/pom.xml       | 19 +++++++++++++++++++
 ql/pom.xml           | 22 ++++++++++++++++++++++
 serde/pom.xml        | 22 ++++++++++++++++++++++
 spark-client/pom.xml | 22 ++++++++++++++++++++++
 5 files changed, 107 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ceff0626/beeline/pom.xml
----------------------------------------------------------------------
diff --git a/beeline/pom.xml b/beeline/pom.xml
index c1d89b8..8ac83f5 100644
--- a/beeline/pom.xml
+++ b/beeline/pom.xml
@@ -126,6 +126,28 @@
     </dependency>
   </dependencies>
 
+  <profiles>
+    <profile>
+      <id>sources</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>attach-sources</id>
+                <goals>
+                  <goal>test-jar</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
   <build>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>

http://git-wip-us.apache.org/repos/asf/hive/blob/ceff0626/common/pom.xml
----------------------------------------------------------------------
diff --git a/common/pom.xml b/common/pom.xml
index cc47257..1b9b4bc 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -192,6 +192,25 @@
 
   <profiles>
     <profile>
+      <id>sources</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>attach-sources</id>
+                <goals>
+                  <goal>test-jar</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
       <id>dist</id>
       <build>
         <plugins>

http://git-wip-us.apache.org/repos/asf/hive/blob/ceff0626/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 330e449..ebb9599 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -683,6 +683,28 @@
     </dependency>
   </dependencies>
 
+  <profiles>
+    <profile>
+      <id>sources</id>
+        <build>
+          <plugins>
+            <plugin>
+              <groupId>org.apache.maven.plugins</groupId>
+              <artifactId>maven-source-plugin</artifactId>
+              <executions>
+                <execution>
+                  <id>attach-sources</id>
+                  <goals>
+                    <goal>test-jar</goal>
+                  </goals>
+                </execution>
+              </executions>
+            </plugin>
+          </plugins>
+       </build>
+    </profile>
+  </profiles>
+
   <build>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>

http://git-wip-us.apache.org/repos/asf/hive/blob/ceff0626/serde/pom.xml
----------------------------------------------------------------------
diff --git a/serde/pom.xml b/serde/pom.xml
index e025a4e..cea7fce 100644
--- a/serde/pom.xml
+++ b/serde/pom.xml
@@ -160,6 +160,28 @@
     </dependency>
   </dependencies>
 
+  <profiles>
+    <profile>
+      <id>sources</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>attach-sources</id>
+                <goals>
+                  <goal>test-jar</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
   <build>
     <sourceDirectory>${basedir}/src/java</sourceDirectory>
     <testSourceDirectory>${basedir}/src/test</testSourceDirectory>

http://git-wip-us.apache.org/repos/asf/hive/blob/ceff0626/spark-client/pom.xml
----------------------------------------------------------------------
diff --git a/spark-client/pom.xml b/spark-client/pom.xml
index 7cca609..b677195 100644
--- a/spark-client/pom.xml
+++ b/spark-client/pom.xml
@@ -102,6 +102,28 @@
     </dependency>
   </dependencies>
 
+  <profiles>
+    <profile>
+      <id>sources</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>attach-sources</id>
+                <goals>
+                  <goal>test-jar</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
   <build>
     <plugins>
       <plugin>


[22/51] [abbrv] hive git commit: HIVE-12039 : Temporarily disable TestSSL#testSSLVersion (Ashutosh Chauhan via Thejas Nair)

Posted by jd...@apache.org.
HIVE-12039 : Temporarily disable TestSSL#testSSLVersion (Ashutosh Chauhan via Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0da77af0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0da77af0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0da77af0

Branch: refs/heads/llap
Commit: 0da77af0eead39de1630b821af65d03ecf23bbcc
Parents: 9e231f2
Author: Ashutosh Chauhan <ha...@apache.org>
Authored: Wed Mar 9 16:25:35 2016 -0800
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Wed Mar 9 16:25:35 2016 -0800

----------------------------------------------------------------------
 itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0da77af0/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
index b66ffda..ea9acba 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestSSL.java
@@ -40,6 +40,7 @@ import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -110,6 +111,7 @@ public class TestSSL {
    * Tests to ensure SSLv2 and SSLv3 are disabled
    */
   @Test
+  @Ignore("Temporarily disable until fixed")
   public void testSSLVersion() throws Exception {
     // we need openssl
     Assume.assumeTrue(execCommand("which openssl") == 0);


[03/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index a546247..05a0749 100644
--- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -956,6 +956,48 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function cancel_delegation_token($token_str_form);
   /**
+   * @param string $token_identifier
+   * @param string $delegation_token
+   * @return bool
+   */
+  public function add_token($token_identifier, $delegation_token);
+  /**
+   * @param string $token_identifier
+   * @return bool
+   */
+  public function remove_token($token_identifier);
+  /**
+   * @param string $token_identifier
+   * @return string
+   */
+  public function get_token($token_identifier);
+  /**
+   * @return string[]
+   */
+  public function get_all_token_identifiers();
+  /**
+   * @param string $key
+   * @return int
+   * @throws \metastore\MetaException
+   */
+  public function add_master_key($key);
+  /**
+   * @param int $seq_number
+   * @param string $key
+   * @throws \metastore\NoSuchObjectException
+   * @throws \metastore\MetaException
+   */
+  public function update_master_key($seq_number, $key);
+  /**
+   * @param int $key_seq
+   * @return bool
+   */
+  public function remove_master_key($key_seq);
+  /**
+   * @return string[]
+   */
+  public function get_master_keys();
+  /**
    * @return \metastore\GetOpenTxnsResponse
    */
   public function get_open_txns();
@@ -7561,6 +7603,420 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     return;
   }
 
+  public function add_token($token_identifier, $delegation_token)
+  {
+    $this->send_add_token($token_identifier, $delegation_token);
+    return $this->recv_add_token();
+  }
+
+  public function send_add_token($token_identifier, $delegation_token)
+  {
+    $args = new \metastore\ThriftHiveMetastore_add_token_args();
+    $args->token_identifier = $token_identifier;
+    $args->delegation_token = $delegation_token;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'add_token', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('add_token', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_add_token()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_token_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_add_token_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("add_token failed: unknown result");
+  }
+
+  public function remove_token($token_identifier)
+  {
+    $this->send_remove_token($token_identifier);
+    return $this->recv_remove_token();
+  }
+
+  public function send_remove_token($token_identifier)
+  {
+    $args = new \metastore\ThriftHiveMetastore_remove_token_args();
+    $args->token_identifier = $token_identifier;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'remove_token', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('remove_token', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_remove_token()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_remove_token_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_remove_token_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("remove_token failed: unknown result");
+  }
+
+  public function get_token($token_identifier)
+  {
+    $this->send_get_token($token_identifier);
+    return $this->recv_get_token();
+  }
+
+  public function send_get_token($token_identifier)
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_token_args();
+    $args->token_identifier = $token_identifier;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_token', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_token', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_token()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_token_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_token_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("get_token failed: unknown result");
+  }
+
+  public function get_all_token_identifiers()
+  {
+    $this->send_get_all_token_identifiers();
+    return $this->recv_get_all_token_identifiers();
+  }
+
+  public function send_get_all_token_identifiers()
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_all_token_identifiers_args();
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_all_token_identifiers', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_all_token_identifiers', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_all_token_identifiers()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_all_token_identifiers_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_all_token_identifiers_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("get_all_token_identifiers failed: unknown result");
+  }
+
+  public function add_master_key($key)
+  {
+    $this->send_add_master_key($key);
+    return $this->recv_add_master_key();
+  }
+
+  public function send_add_master_key($key)
+  {
+    $args = new \metastore\ThriftHiveMetastore_add_master_key_args();
+    $args->key = $key;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'add_master_key', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('add_master_key', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_add_master_key()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_add_master_key_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_add_master_key_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    throw new \Exception("add_master_key failed: unknown result");
+  }
+
+  public function update_master_key($seq_number, $key)
+  {
+    $this->send_update_master_key($seq_number, $key);
+    $this->recv_update_master_key();
+  }
+
+  public function send_update_master_key($seq_number, $key)
+  {
+    $args = new \metastore\ThriftHiveMetastore_update_master_key_args();
+    $args->seq_number = $seq_number;
+    $args->key = $key;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'update_master_key', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('update_master_key', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_update_master_key()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_update_master_key_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_update_master_key_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->o1 !== null) {
+      throw $result->o1;
+    }
+    if ($result->o2 !== null) {
+      throw $result->o2;
+    }
+    return;
+  }
+
+  public function remove_master_key($key_seq)
+  {
+    $this->send_remove_master_key($key_seq);
+    return $this->recv_remove_master_key();
+  }
+
+  public function send_remove_master_key($key_seq)
+  {
+    $args = new \metastore\ThriftHiveMetastore_remove_master_key_args();
+    $args->key_seq = $key_seq;
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'remove_master_key', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('remove_master_key', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_remove_master_key()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_remove_master_key_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_remove_master_key_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("remove_master_key failed: unknown result");
+  }
+
+  public function get_master_keys()
+  {
+    $this->send_get_master_keys();
+    return $this->recv_get_master_keys();
+  }
+
+  public function send_get_master_keys()
+  {
+    $args = new \metastore\ThriftHiveMetastore_get_master_keys_args();
+    $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+    if ($bin_accel)
+    {
+      thrift_protocol_write_binary($this->output_, 'get_master_keys', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+    }
+    else
+    {
+      $this->output_->writeMessageBegin('get_master_keys', TMessageType::CALL, $this->seqid_);
+      $args->write($this->output_);
+      $this->output_->writeMessageEnd();
+      $this->output_->getTransport()->flush();
+    }
+  }
+
+  public function recv_get_master_keys()
+  {
+    $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+    if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_master_keys_result', $this->input_->isStrictRead());
+    else
+    {
+      $rseqid = 0;
+      $fname = null;
+      $mtype = 0;
+
+      $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+      if ($mtype == TMessageType::EXCEPTION) {
+        $x = new TApplicationException();
+        $x->read($this->input_);
+        $this->input_->readMessageEnd();
+        throw $x;
+      }
+      $result = new \metastore\ThriftHiveMetastore_get_master_keys_result();
+      $result->read($this->input_);
+      $this->input_->readMessageEnd();
+    }
+    if ($result->success !== null) {
+      return $result->success;
+    }
+    throw new \Exception("get_master_keys failed: unknown result");
+  }
+
   public function get_open_txns()
   {
     $this->send_get_open_txns();
@@ -36689,6 +37145,1306 @@ class ThriftHiveMetastore_cancel_delegation_token_result {
 
 }
 
+class ThriftHiveMetastore_add_token_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $token_identifier = null;
+  /**
+   * @var string
+   */
+  public $delegation_token = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'token_identifier',
+          'type' => TType::STRING,
+          ),
+        2 => array(
+          'var' => 'delegation_token',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['token_identifier'])) {
+        $this->token_identifier = $vals['token_identifier'];
+      }
+      if (isset($vals['delegation_token'])) {
+        $this->delegation_token = $vals['delegation_token'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_token_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->token_identifier);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->delegation_token);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_token_args');
+    if ($this->token_identifier !== null) {
+      $xfer += $output->writeFieldBegin('token_identifier', TType::STRING, 1);
+      $xfer += $output->writeString($this->token_identifier);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->delegation_token !== null) {
+      $xfer += $output->writeFieldBegin('delegation_token', TType::STRING, 2);
+      $xfer += $output->writeString($this->delegation_token);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_add_token_result {
+  static $_TSPEC;
+
+  /**
+   * @var bool
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::BOOL,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_token_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_token_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::BOOL, 0);
+      $xfer += $output->writeBool($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_remove_token_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $token_identifier = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'token_identifier',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['token_identifier'])) {
+        $this->token_identifier = $vals['token_identifier'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_remove_token_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->token_identifier);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_remove_token_args');
+    if ($this->token_identifier !== null) {
+      $xfer += $output->writeFieldBegin('token_identifier', TType::STRING, 1);
+      $xfer += $output->writeString($this->token_identifier);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_remove_token_result {
+  static $_TSPEC;
+
+  /**
+   * @var bool
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::BOOL,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_remove_token_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_remove_token_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::BOOL, 0);
+      $xfer += $output->writeBool($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_token_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $token_identifier = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'token_identifier',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['token_identifier'])) {
+        $this->token_identifier = $vals['token_identifier'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_token_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->token_identifier);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_token_args');
+    if ($this->token_identifier !== null) {
+      $xfer += $output->writeFieldBegin('token_identifier', TType::STRING, 1);
+      $xfer += $output->writeString($this->token_identifier);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_token_result {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_token_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_token_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::STRING, 0);
+      $xfer += $output->writeString($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_all_token_identifiers_args {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_all_token_identifiers_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_token_identifiers_args');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_all_token_identifiers_result {
+  static $_TSPEC;
+
+  /**
+   * @var string[]
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_all_token_identifiers_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size950 = 0;
+            $_etype953 = 0;
+            $xfer += $input->readListBegin($_etype953, $_size950);
+            for ($_i954 = 0; $_i954 < $_size950; ++$_i954)
+            {
+              $elem955 = null;
+              $xfer += $input->readString($elem955);
+              $this->success []= $elem955;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_all_token_identifiers_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRING, count($this->success));
+        {
+          foreach ($this->success as $iter956)
+          {
+            $xfer += $output->writeString($iter956);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_add_master_key_args {
+  static $_TSPEC;
+
+  /**
+   * @var string
+   */
+  public $key = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'key',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['key'])) {
+        $this->key = $vals['key'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_master_key_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->key);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_master_key_args');
+    if ($this->key !== null) {
+      $xfer += $output->writeFieldBegin('key', TType::STRING, 1);
+      $xfer += $output->writeString($this->key);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_add_master_key_result {
+  static $_TSPEC;
+
+  /**
+   * @var int
+   */
+  public $success = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o1 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::I32,
+          ),
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_add_master_key_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\MetaException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_master_key_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::I32, 0);
+      $xfer += $output->writeI32($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_update_master_key_args {
+  static $_TSPEC;
+
+  /**
+   * @var int
+   */
+  public $seq_number = null;
+  /**
+   * @var string
+   */
+  public $key = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'seq_number',
+          'type' => TType::I32,
+          ),
+        2 => array(
+          'var' => 'key',
+          'type' => TType::STRING,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['seq_number'])) {
+        $this->seq_number = $vals['seq_number'];
+      }
+      if (isset($vals['key'])) {
+        $this->key = $vals['key'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_update_master_key_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->seq_number);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->key);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_master_key_args');
+    if ($this->seq_number !== null) {
+      $xfer += $output->writeFieldBegin('seq_number', TType::I32, 1);
+      $xfer += $output->writeI32($this->seq_number);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->key !== null) {
+      $xfer += $output->writeFieldBegin('key', TType::STRING, 2);
+      $xfer += $output->writeString($this->key);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_update_master_key_result {
+  static $_TSPEC;
+
+  /**
+   * @var \metastore\NoSuchObjectException
+   */
+  public $o1 = null;
+  /**
+   * @var \metastore\MetaException
+   */
+  public $o2 = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'o1',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\NoSuchObjectException',
+          ),
+        2 => array(
+          'var' => 'o2',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\MetaException',
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['o1'])) {
+        $this->o1 = $vals['o1'];
+      }
+      if (isset($vals['o2'])) {
+        $this->o2 = $vals['o2'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_update_master_key_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::STRUCT) {
+            $this->o1 = new \metastore\NoSuchObjectException();
+            $xfer += $this->o1->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 2:
+          if ($ftype == TType::STRUCT) {
+            $this->o2 = new \metastore\MetaException();
+            $xfer += $this->o2->read($input);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_master_key_result');
+    if ($this->o1 !== null) {
+      $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1);
+      $xfer += $this->o1->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->o2 !== null) {
+      $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2);
+      $xfer += $this->o2->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_remove_master_key_args {
+  static $_TSPEC;
+
+  /**
+   * @var int
+   */
+  public $key_seq = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        1 => array(
+          'var' => 'key_seq',
+          'type' => TType::I32,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['key_seq'])) {
+        $this->key_seq = $vals['key_seq'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_remove_master_key_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 1:
+          if ($ftype == TType::I32) {
+            $xfer += $input->readI32($this->key_seq);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_remove_master_key_args');
+    if ($this->key_seq !== null) {
+      $xfer += $output->writeFieldBegin('key_seq', TType::I32, 1);
+      $xfer += $output->writeI32($this->key_seq);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_remove_master_key_result {
+  static $_TSPEC;
+
+  /**
+   * @var bool
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::BOOL,
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_remove_master_key_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::BOOL) {
+            $xfer += $input->readBool($this->success);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_remove_master_key_result');
+    if ($this->success !== null) {
+      $xfer += $output->writeFieldBegin('success', TType::BOOL, 0);
+      $xfer += $output->writeBool($this->success);
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_master_keys_args {
+  static $_TSPEC;
+
+
+  public function __construct() {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        );
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_master_keys_args';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_master_keys_args');
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
+class ThriftHiveMetastore_get_master_keys_result {
+  static $_TSPEC;
+
+  /**
+   * @var string[]
+   */
+  public $success = null;
+
+  public function __construct($vals=null) {
+    if (!isset(self::$_TSPEC)) {
+      self::$_TSPEC = array(
+        0 => array(
+          'var' => 'success',
+          'type' => TType::LST,
+          'etype' => TType::STRING,
+          'elem' => array(
+            'type' => TType::STRING,
+            ),
+          ),
+        );
+    }
+    if (is_array($vals)) {
+      if (isset($vals['success'])) {
+        $this->success = $vals['success'];
+      }
+    }
+  }
+
+  public function getName() {
+    return 'ThriftHiveMetastore_get_master_keys_result';
+  }
+
+  public function read($input)
+  {
+    $xfer = 0;
+    $fname = null;
+    $ftype = 0;
+    $fid = 0;
+    $xfer += $input->readStructBegin($fname);
+    while (true)
+    {
+      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+      if ($ftype == TType::STOP) {
+        break;
+      }
+      switch ($fid)
+      {
+        case 0:
+          if ($ftype == TType::LST) {
+            $this->success = array();
+            $_size957 = 0;
+            $_etype960 = 0;
+            $xfer += $input->readListBegin($_etype960, $_size957);
+            for ($_i961 = 0; $_i961 < $_size957; ++$_i961)
+            {
+              $elem962 = null;
+              $xfer += $input->readString($elem962);
+              $this->success []= $elem962;
+            }
+            $xfer += $input->readListEnd();
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        default:
+          $xfer += $input->skip($ftype);
+          break;
+      }
+      $xfer += $input->readFieldEnd();
+    }
+    $xfer += $input->readStructEnd();
+    return $xfer;
+  }
+
+  public function write($output) {
+    $xfer = 0;
+    $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_master_keys_result');
+    if ($this->success !== null) {
+      if (!is_array($this->success)) {
+        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+      }
+      $xfer += $output->writeFieldBegin('success', TType::LST, 0);
+      {
+        $output->writeListBegin(TType::STRING, count($this->success));
+        {
+          foreach ($this->success as $iter963)
+          {
+            $xfer += $output->writeString($iter963);
+          }
+        }
+        $output->writeListEnd();
+      }
+      $xfer += $output->writeFieldEnd();
+    }
+    $xfer += $output->writeFieldStop();
+    $xfer += $output->writeStructEnd();
+    return $xfer;
+  }
+
+}
+
 class ThriftHiveMetastore_get_open_txns_args {
   static $_TSPEC;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 9a53d56..516b926 100755
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -135,6 +135,14 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('  string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)')
   print('  i64 renew_delegation_token(string token_str_form)')
   print('  void cancel_delegation_token(string token_str_form)')
+  print('  bool add_token(string token_identifier, string delegation_token)')
+  print('  bool remove_token(string token_identifier)')
+  print('  string get_token(string token_identifier)')
+  print('   get_all_token_identifiers()')
+  print('  i32 add_master_key(string key)')
+  print('  void update_master_key(i32 seq_number, string key)')
+  print('  bool remove_master_key(i32 key_seq)')
+  print('   get_master_keys()')
   print('  GetOpenTxnsResponse get_open_txns()')
   print('  GetOpenTxnsInfoResponse get_open_txns_info()')
   print('  OpenTxnsResponse open_txns(OpenTxnRequest rqst)')
@@ -894,6 +902,54 @@ elif cmd == 'cancel_delegation_token':
     sys.exit(1)
   pp.pprint(client.cancel_delegation_token(args[0],))
 
+elif cmd == 'add_token':
+  if len(args) != 2:
+    print('add_token requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.add_token(args[0],args[1],))
+
+elif cmd == 'remove_token':
+  if len(args) != 1:
+    print('remove_token requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.remove_token(args[0],))
+
+elif cmd == 'get_token':
+  if len(args) != 1:
+    print('get_token requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.get_token(args[0],))
+
+elif cmd == 'get_all_token_identifiers':
+  if len(args) != 0:
+    print('get_all_token_identifiers requires 0 args')
+    sys.exit(1)
+  pp.pprint(client.get_all_token_identifiers())
+
+elif cmd == 'add_master_key':
+  if len(args) != 1:
+    print('add_master_key requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.add_master_key(args[0],))
+
+elif cmd == 'update_master_key':
+  if len(args) != 2:
+    print('update_master_key requires 2 args')
+    sys.exit(1)
+  pp.pprint(client.update_master_key(eval(args[0]),args[1],))
+
+elif cmd == 'remove_master_key':
+  if len(args) != 1:
+    print('remove_master_key requires 1 args')
+    sys.exit(1)
+  pp.pprint(client.remove_master_key(eval(args[0]),))
+
+elif cmd == 'get_master_keys':
+  if len(args) != 0:
+    print('get_master_keys requires 0 args')
+    sys.exit(1)
+  pp.pprint(client.get_master_keys())
+
 elif cmd == 'get_open_txns':
   if len(args) != 0:
     print('get_open_txns requires 0 args')


[08/51] [abbrv] hive git commit: HIVE-13153: SessionID is appended to thread name twice (Prasanth Jayachandran reviewed by Vikram Dixit, Sergey Shelukhin)

Posted by jd...@apache.org.
HIVE-13153: SessionID is appended to thread name twice (Prasanth Jayachandran reviewed by Vikram Dixit, Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe301214
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe301214
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe301214

Branch: refs/heads/llap
Commit: fe3012145643679c738232e6b0e7382ab810cdf6
Parents: 87131d0
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Tue Mar 8 15:26:29 2016 -0600
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Tue Mar 8 15:26:29 2016 -0600

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/cli/CliDriver.java   |  7 ++++---
 .../hadoop/hive/ql/session/SessionState.java    | 22 ++++++++++++++++++++
 .../service/cli/session/HiveSessionImpl.java    | 17 ++-------------
 3 files changed, 28 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fe301214/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
----------------------------------------------------------------------
diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
index e77b7f1..b6fe77c 100644
--- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
+++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java
@@ -118,8 +118,8 @@ public class CliDriver {
     CliSessionState ss = (CliSessionState) SessionState.get();
     ss.setLastCommand(cmd);
 
-    String callerInfo = ss.getConf().getLogIdVar(ss.getSessionId());
-    Thread.currentThread().setName(callerInfo + " " + originalThreadName);
+    ss.updateThreadName();
+
     // Flush the print stream, so it doesn't include output from the last command
     ss.err.flush();
     String cmd_trimmed = cmd.trim();
@@ -711,7 +711,8 @@ public class CliDriver {
       SessionState.start(ss);
     }
 
-    Thread.currentThread().setName(conf.getLogIdVar(ss.getSessionId()) + " " + originalThreadName);
+    ss.updateThreadName();
+
     // execute cli driver work
     try {
       return executeDriver(ss, conf, oproc);

http://git-wip-us.apache.org/repos/asf/hive/blob/fe301214/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 70b2bc0..109cd8c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -399,6 +399,28 @@ public class SessionState {
     return (conf.getVar(HiveConf.ConfVars.HIVESESSIONID));
   }
 
+  public void updateThreadName() {
+    final String sessionId = getSessionId();
+    final String logPrefix = getConf().getLogIdVar(sessionId);
+    final String currThreadName = Thread.currentThread().getName();
+    if (!currThreadName.contains(logPrefix)) {
+      final String newThreadName = logPrefix + " " + currThreadName;
+      LOG.info("Updating thread name to {}", newThreadName);
+      Thread.currentThread().setName(newThreadName);
+    }
+  }
+
+  public void resetThreadName() {
+    final String sessionId = getSessionId();
+    final String logPrefix = getConf().getLogIdVar(sessionId);
+    final String currThreadName = Thread.currentThread().getName();
+    if (currThreadName.contains(logPrefix)) {
+      final String[] names = currThreadName.split(logPrefix);
+      LOG.info("Resetting thread name to {}", names[names.length - 1]);
+      Thread.currentThread().setName(names[names.length - 1]);
+    }
+  }
+
   /**
    * Initialize the transaction manager.  This is done lazily to avoid hard wiring one
    * transaction manager at the beginning of the session.

http://git-wip-us.apache.org/repos/asf/hive/blob/fe301214/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
index 2e45e2d..8baecdf 100644
--- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
+++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java
@@ -330,10 +330,7 @@ public class HiveSessionImpl implements HiveSession {
       lastAccessTime = System.currentTimeMillis();
     }
     // set the thread name with the logging prefix.
-    String logPrefix = getHiveConf().getLogIdVar(sessionState.getSessionId());
-    LOG.info(
-        "Prefixing the thread name (" + Thread.currentThread().getName() + ") with " + logPrefix);
-    Thread.currentThread().setName(logPrefix + Thread.currentThread().getName());
+    sessionState.updateThreadName();
     Hive.set(sessionHive);
   }
 
@@ -348,17 +345,7 @@ public class HiveSessionImpl implements HiveSession {
     if (sessionState != null) {
       // can be null in-case of junit tests. skip reset.
       // reset thread name at release time.
-      String[] names = Thread.currentThread().getName()
-          .split(getHiveConf().getLogIdVar(sessionState.getSessionId()));
-      String threadName = null;
-      if (names.length > 1) {
-        threadName = names[names.length - 1];
-      } else if (names.length == 1) {
-        threadName = names[0];
-      } else {
-        threadName = "";
-      }
-      Thread.currentThread().setName(threadName);
+      sessionState.resetThreadName();
     }
 
     SessionState.detachSession();


[32/51] [abbrv] hive git commit: HIVE-13263: Vectorization: Unable to vectorize regexp_extract/regexp_replace "Udf: GenericUDFBridge, is not supported" (Matt McCline, reviewed by Gopal Vijayaraghavan)

Posted by jd...@apache.org.
HIVE-13263: Vectorization: Unable to vectorize regexp_extract/regexp_replace "Udf: GenericUDFBridge, is not supported" (Matt McCline, reviewed by Gopal Vijayaraghavan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/114e9f1c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/114e9f1c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/114e9f1c

Branch: refs/heads/llap
Commit: 114e9f1cde5ad562466bedda1ce5db0102e5ff2f
Parents: 428a930
Author: Matt McCline <mm...@hortonworks.com>
Authored: Sun Mar 13 04:21:25 2016 -0700
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Sun Mar 13 04:21:25 2016 -0700

----------------------------------------------------------------------
 .../ql/exec/vector/VectorizationContext.java    |    5 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    4 +
 .../test/queries/clientpositive/vector_udf1.q   |  327 ++++
 .../results/clientpositive/vector_udf1.q.out    | 1640 ++++++++++++++++++
 4 files changed, 1975 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/114e9f1c/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
index dd59bf2..3f95be2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java
@@ -50,7 +50,6 @@ import org.apache.hadoop.hive.ql.exec.vector.ColumnVector.Type;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.InputExpressionType;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor.Mode;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.*;
-import org.apache.hadoop.hive.ql.exec.vector.expressions.CastTimestampToDouble;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFAvgDecimal;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorUDAFAvgTimestamp;
@@ -102,6 +101,8 @@ import org.apache.hadoop.hive.ql.plan.GroupByDesc;
 import org.apache.hadoop.hive.ql.udf.SettableUDF;
 import org.apache.hadoop.hive.ql.udf.UDFConv;
 import org.apache.hadoop.hive.ql.udf.UDFHex;
+import org.apache.hadoop.hive.ql.udf.UDFRegExpExtract;
+import org.apache.hadoop.hive.ql.udf.UDFRegExpReplace;
 import org.apache.hadoop.hive.ql.udf.UDFSign;
 import org.apache.hadoop.hive.ql.udf.UDFToBoolean;
 import org.apache.hadoop.hive.ql.udf.UDFToByte;
@@ -755,6 +756,8 @@ public class VectorizationContext {
       GenericUDFBridge bridge = (GenericUDFBridge) gudf;
       Class<? extends UDF> udfClass = bridge.getUdfClass();
       if (udfClass.equals(UDFHex.class)
+          || udfClass.equals(UDFRegExpExtract.class)
+          || udfClass.equals(UDFRegExpReplace.class)
           || udfClass.equals(UDFConv.class)
           || isCastToIntFamily(udfClass) && isStringFamily(arg0Type(expr))
           || isCastToFloatFamily(udfClass) && isStringFamily(arg0Type(expr))

http://git-wip-us.apache.org/repos/asf/hive/blob/114e9f1c/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 607eb4f..f674ece 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -138,6 +138,8 @@ import org.apache.hadoop.hive.ql.udf.UDFMinute;
 import org.apache.hadoop.hive.ql.udf.UDFMonth;
 import org.apache.hadoop.hive.ql.udf.UDFRadians;
 import org.apache.hadoop.hive.ql.udf.UDFRand;
+import org.apache.hadoop.hive.ql.udf.UDFRegExpExtract;
+import org.apache.hadoop.hive.ql.udf.UDFRegExpReplace;
 import org.apache.hadoop.hive.ql.udf.UDFSecond;
 import org.apache.hadoop.hive.ql.udf.UDFSign;
 import org.apache.hadoop.hive.ql.udf.UDFSin;
@@ -253,6 +255,8 @@ public class Vectorizer implements PhysicalPlanResolver {
 
     supportedGenericUDFs.add(UDFLike.class);
     supportedGenericUDFs.add(GenericUDFRegExp.class);
+    supportedGenericUDFs.add(UDFRegExpExtract.class);
+    supportedGenericUDFs.add(UDFRegExpReplace.class);
     supportedGenericUDFs.add(UDFSubstr.class);
     supportedGenericUDFs.add(GenericUDFLTrim.class);
     supportedGenericUDFs.add(GenericUDFRTrim.class);

http://git-wip-us.apache.org/repos/asf/hive/blob/114e9f1c/ql/src/test/queries/clientpositive/vector_udf1.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/vector_udf1.q b/ql/src/test/queries/clientpositive/vector_udf1.q
new file mode 100644
index 0000000..2fcc69b
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/vector_udf1.q
@@ -0,0 +1,327 @@
+SET hive.vectorized.execution.enabled=true;
+set hive.fetch.task.conversion=none;
+
+drop table varchar_udf_1;
+
+create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC;
+insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1;
+
+-- UDFs with varchar support
+explain
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1;
+
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1;
+
+select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1;
+
+select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1;
+
+-- Scalar UDFs
+explain
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1;
+
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1;
+
+select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1;
+
+select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1;
+
+select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1;
+
+select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1;
+
+select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1;
+
+select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1;
+
+select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1;
+
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1;
+
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1;
+
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1;
+
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1;
+
+select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1;
+
+select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1;
+
+select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1;
+
+explain
+select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1;
+
+select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1;
+
+explain
+select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1;
+
+select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1;
+
+explain
+select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1;
+
+select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1;
+
+explain
+select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1;
+
+select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1;
+
+
+-- Aggregate Functions
+explain
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1;
+
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1;
+
+explain
+select
+  min(c2),
+  min(c4)
+from varchar_udf_1;
+
+select
+  min(c2),
+  min(c4)
+from varchar_udf_1;
+
+explain
+select
+  max(c2),
+  max(c4)
+from varchar_udf_1;
+
+select
+  max(c2),
+  max(c4)
+from varchar_udf_1;
+
+drop table varchar_udf_1;

http://git-wip-us.apache.org/repos/asf/hive/blob/114e9f1c/ql/src/test/results/clientpositive/vector_udf1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_udf1.q.out b/ql/src/test/results/clientpositive/vector_udf1.q.out
new file mode 100644
index 0000000..bb02ea7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/vector_udf1.q.out
@@ -0,0 +1,1640 @@
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)) STORED AS ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@varchar_udf_1
+PREHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: insert overwrite table varchar_udf_1
+  select key, value, key, value from src where key = '238' limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Output: default@varchar_udf_1
+POSTHOOK: Lineage: varchar_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: varchar_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: -- UDFs with varchar support
+explain
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- UDFs with varchar support
+explain
+select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: concat(c1, c2) (type: string), concat(c3, c4) (type: varchar(30)), (concat(c1, c2) = UDFToString(concat(c3, c4))) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  concat(c1, c2),
+  concat(c3, c4),
+  concat(c1, c2) = concat(c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238val_238	238val_238	true
+PREHOOK: query: explain
+select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: upper(c2) (type: string), upper(c4) (type: varchar(20)), (upper(c2) = UDFToString(upper(c4))) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  upper(c2),
+  upper(c4),
+  upper(c2) = upper(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+VAL_238	VAL_238	true
+PREHOOK: query: explain
+select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: lower(c2) (type: string), lower(c4) (type: varchar(20)), (lower(c2) = UDFToString(lower(c4))) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lower(c2),
+  lower(c4),
+  lower(c2) = lower(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Scalar UDFs
+explain
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Scalar UDFs
+explain
+select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ascii(c2) (type: int), ascii(c4) (type: int), (ascii(c2) = ascii(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  ascii(c2),
+  ascii(c4),
+  ascii(c2) = ascii(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+118	118	true
+PREHOOK: query: explain
+select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: concat_ws('|', c1, c2) (type: string), concat_ws('|', c3, c4) (type: string), (concat_ws('|', c1, c2) = concat_ws('|', c3, c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  concat_ws('|', c1, c2),
+  concat_ws('|', c3, c4),
+  concat_ws('|', c1, c2) = concat_ws('|', c3, c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238|val_238	238|val_238	true
+PREHOOK: query: explain
+select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: decode(encode(c2,'US-ASCII'),'US-ASCII') (type: string), decode(encode(c4,'US-ASCII'),'US-ASCII') (type: string), (decode(encode(c2,'US-ASCII'),'US-ASCII') = decode(encode(c4,'US-ASCII'),'US-ASCII')) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c4, 'US-ASCII'), 'US-ASCII'),
+  decode(encode(c2, 'US-ASCII'), 'US-ASCII') = decode(encode(c4, 'US-ASCII'), 'US-ASCII')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: explain
+select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: instr(c2, '_') (type: int), instr(c4, '_') (type: int), (instr(c2, '_') = instr(c4, '_')) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  instr(c2, '_'),
+  instr(c4, '_'),
+  instr(c2, '_') = instr(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+4	4	true
+PREHOOK: query: explain
+select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: length(c2) (type: int), length(c4) (type: int), (length(c2) = length(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  length(c2),
+  length(c4),
+  length(c2) = length(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+7	7	true
+PREHOOK: query: explain
+select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: 5 (type: int), 5 (type: int), true (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  locate('a', 'abcdabcd', 3),
+  locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3),
+  locate('a', 'abcdabcd', 3) = locate(cast('a' as varchar(1)), cast('abcdabcd' as varchar(10)), 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+5	5	true
+PREHOOK: query: explain
+select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: lpad(c2, 15, ' ') (type: string), lpad(c4, 15, ' ') (type: string), (lpad(c2, 15, ' ') = lpad(c4, 15, ' ')) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  lpad(c2, 15, ' '),
+  lpad(c4, 15, ' '),
+  lpad(c2, 15, ' ') = lpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+        val_238	        val_238	true
+PREHOOK: query: explain
+select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: ltrim(c2) (type: string), ltrim(c4) (type: string), (ltrim(c2) = ltrim(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  ltrim(c2),
+  ltrim(c4),
+  ltrim(c2) = ltrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: c2 regexp 'val' (type: boolean), c4 regexp 'val' (type: boolean), (c2 regexp 'val' = c4 regexp 'val') (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  c2 regexp 'val',
+  c4 regexp 'val',
+  (c2 regexp 'val') = (c4 regexp 'val')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+true	true	true
+PREHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: regexp_extract(c2, 'val_([0-9]+)', 1) (type: string), regexp_extract(c4, 'val_([0-9]+)', 1) (type: string), (regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_extract(c2, 'val_([0-9]+)', 1),
+  regexp_extract(c4, 'val_([0-9]+)', 1),
+  regexp_extract(c2, 'val_([0-9]+)', 1) = regexp_extract(c4, 'val_([0-9]+)', 1)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+238	238	true
+PREHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: regexp_replace(c2, 'val', 'replaced') (type: string), regexp_replace(c4, 'val', 'replaced') (type: string), (regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  regexp_replace(c2, 'val', 'replaced'),
+  regexp_replace(c4, 'val', 'replaced'),
+  regexp_replace(c2, 'val', 'replaced') = regexp_replace(c4, 'val', 'replaced')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+replaced_238	replaced_238	true
+PREHOOK: query: explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: reverse(c2) (type: string), reverse(c4) (type: string), (reverse(c2) = reverse(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  reverse(c2),
+  reverse(c4),
+  reverse(c2) = reverse(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+832_lav	832_lav	true
+PREHOOK: query: explain
+select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: rpad(c2, 15, ' ') (type: string), rpad(c4, 15, ' ') (type: string), (rpad(c2, 15, ' ') = rpad(c4, 15, ' ')) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rpad(c2, 15, ' '),
+  rpad(c4, 15, ' '),
+  rpad(c2, 15, ' ') = rpad(c4, 15, ' ')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238        	val_238        	true
+PREHOOK: query: explain
+select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: rtrim(c2) (type: string), rtrim(c4) (type: string), (rtrim(c2) = rtrim(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  rtrim(c2),
+  rtrim(c4),
+  rtrim(c2) = rtrim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: explain
+select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: sentences('See spot run.  See jane run.') (type: array<array<string>>), sentences('See spot run.  See jane run.') (type: array<array<string>>)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  sentences('See spot run.  See jane run.'),
+  sentences(cast('See spot run.  See jane run.' as varchar(50)))
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+[["See","spot","run"],["See","jane","run"]]	[["See","spot","run"],["See","jane","run"]]
+PREHOOK: query: explain
+select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: split(c2, '_') (type: array<string>), split(c4, '_') (type: array<string>)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  split(c2, '_'),
+  split(c4, '_')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+["val","238"]	["val","238"]
+PREHOOK: query: explain
+select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>), str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
+              outputColumnNames: _col0, _col1
+              Statistics: Num rows: 1 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select 
+  str_to_map('a:1,b:2,c:3',',',':'),
+  str_to_map(cast('a:1,b:2,c:3' as varchar(20)),',',':')
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"b":"2","a":"1","c":"3"}	{"b":"2","a":"1","c":"3"}
+PREHOOK: query: explain
+select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: substr(c2, 1, 3) (type: string), substr(c4, 1, 3) (type: string), (substr(c2, 1, 3) = substr(c4, 1, 3)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  substr(c2, 1, 3),
+  substr(c4, 1, 3),
+  substr(c2, 1, 3) = substr(c4, 1, 3)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val	val	true
+PREHOOK: query: explain
+select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: trim(c2) (type: string), trim(c4) (type: string), (trim(c2) = trim(c4)) (type: boolean)
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Limit
+                Number of rows: 1
+                Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  trim(c2),
+  trim(c4),
+  trim(c2) = trim(c4)
+from varchar_udf_1 limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238	true
+PREHOOK: query: -- Aggregate Functions
+explain
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+PREHOOK: type: QUERY
+POSTHOOK: query: -- Aggregate Functions
+explain
+select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: c2 (type: string), c4 (type: varchar(20))
+              outputColumnNames: _col0, _col2
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: compute_stats(_col0, 16), compute_stats(_col2, 16)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+                  value expressions: _col0 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>), _col1 (type: struct<columntype:string,maxlength:bigint,sumlength:bigint,count:bigint,countnulls:bigint,bitvector:string,numbitvectors:int>)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0), compute_stats(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  compute_stats(c2, 16),
+  compute_stats(c4, 16)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}	{"columntype":"String","maxlength":7,"avglength":7.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{3}{2}{3}{1}{0}{2}{0}{1}{0}{0}{1}{3}{2}{0}{3}"}
+PREHOOK: query: explain
+select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: c2 (type: string), c4 (type: varchar(20))
+              outputColumnNames: c2, c4
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: min(c2), min(c4)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: varchar(20))
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0), min(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  min(c2),
+  min(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: explain
+select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: varchar_udf_1
+            Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: c2 (type: string), c4 (type: varchar(20))
+              outputColumnNames: c2, c4
+              Statistics: Num rows: 1 Data size: 356 Basic stats: COMPLETE Column stats: NONE
+              Group By Operator
+                aggregations: max(c2), max(c4)
+                mode: hash
+                outputColumnNames: _col0, _col1
+                Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: varchar(20))
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: max(VALUE._col0), max(VALUE._col1)
+          mode: mergepartial
+          outputColumnNames: _col0, _col1
+          Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+POSTHOOK: query: select
+  max(c2),
+  max(c4)
+from varchar_udf_1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@varchar_udf_1
+#### A masked pattern was here ####
+val_238	val_238
+PREHOOK: query: drop table varchar_udf_1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@varchar_udf_1
+PREHOOK: Output: default@varchar_udf_1
+POSTHOOK: query: drop table varchar_udf_1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@varchar_udf_1
+POSTHOOK: Output: default@varchar_udf_1


[33/51] [abbrv] hive git commit: HIVE-13198: Authorization issues with cascading views (Pengcheng Xiong, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13198: Authorization issues with cascading views (Pengcheng Xiong, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ca165db8
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ca165db8
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ca165db8

Branch: refs/heads/llap
Commit: ca165db8dd7846cc88e0903d061a9713551c3a72
Parents: 114e9f1
Author: Pengcheng Xiong <px...@apache.org>
Authored: Sun Mar 13 12:04:29 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Sun Mar 13 12:04:29 2016 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/ql/Driver.java  | 73 ++++++++++----------
 .../hadoop/hive/ql/exec/TableScanOperator.java  | 10 +++
 .../calcite/reloperators/HiveTableScan.java     | 23 ++++--
 .../calcite/translator/ASTBuilder.java          |  8 +++
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  3 +-
 .../org/apache/hadoop/hive/ql/parse/QB.java     | 16 +++++
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  | 32 +++++++--
 .../clientnegative/authorization_view_5.q       | 16 +++++
 .../clientnegative/authorization_view_6.q       | 18 +++++
 .../clientnegative/authorization_view_7.q       | 18 +++++
 .../authorization_view_disable_cbo_5.q          | 17 +++++
 .../authorization_view_disable_cbo_6.q          | 19 +++++
 .../authorization_view_disable_cbo_7.q          | 19 +++++
 .../clientpositive/authorization_view_2.q       | 16 +++++
 .../clientpositive/authorization_view_3.q       | 18 +++++
 .../clientpositive/authorization_view_4.q       | 18 +++++
 .../authorization_view_disable_cbo_2.q          | 17 +++++
 .../authorization_view_disable_cbo_3.q          | 19 +++++
 .../authorization_view_disable_cbo_4.q          | 19 +++++
 .../clientnegative/authorization_view_5.q.out   | 35 ++++++++++
 .../clientnegative/authorization_view_6.q.out   | 45 ++++++++++++
 .../clientnegative/authorization_view_7.q.out   | 45 ++++++++++++
 .../authorization_view_disable_cbo_5.q.out      | 35 ++++++++++
 .../authorization_view_disable_cbo_6.q.out      | 45 ++++++++++++
 .../authorization_view_disable_cbo_7.q.out      | 45 ++++++++++++
 .../clientpositive/authorization_view_2.q.out   | 66 ++++++++++++++++++
 .../clientpositive/authorization_view_3.q.out   | 62 +++++++++++++++++
 .../clientpositive/authorization_view_4.q.out   | 64 +++++++++++++++++
 .../authorization_view_disable_cbo_2.q.out      | 66 ++++++++++++++++++
 .../authorization_view_disable_cbo_3.q.out      | 62 +++++++++++++++++
 .../authorization_view_disable_cbo_4.q.out      | 64 +++++++++++++++++
 .../results/clientpositive/subquery_views.q.out |  8 +--
 .../clientpositive/tez/explainuser_1.q.out      |  2 +-
 33 files changed, 971 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index b50c5a2..f0fda05 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -733,14 +733,14 @@ public class Driver implements CommandProcessor {
         }
       }
 
+      // column authorization is checked through table scan operators.
       getTablePartitionUsedColumns(op, sem, tab2Cols, part2Cols, tableUsePartLevelAuth);
 
-
-
       // cache the results for table authorization
       Set<String> tableAuthChecked = new HashSet<String>();
       for (ReadEntity read : inputs) {
-        if (read.isDummy() || read.isPathType()) {
+        // if read is not direct, we do not need to check its autho.
+        if (read.isDummy() || read.isPathType() || !read.isDirect()) {
           continue;
         }
         if (read.getType() == Entity.Type.DATABASE) {
@@ -796,46 +796,49 @@ public class Driver implements CommandProcessor {
     // for a select or create-as-select query, populate the partition to column
     // (par2Cols) or
     // table to columns mapping (tab2Cols)
-    if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
-        || op.equals(HiveOperation.QUERY)) {
+    if (op.equals(HiveOperation.CREATETABLE_AS_SELECT) || op.equals(HiveOperation.QUERY)) {
       SemanticAnalyzer querySem = (SemanticAnalyzer) sem;
       ParseContext parseCtx = querySem.getParseContext();
 
-      for (Map.Entry<String, TableScanOperator> topOpMap : querySem.getParseContext().getTopOps().entrySet()) {
-        TableScanOperator topOp = topOpMap.getValue();
-        TableScanOperator tableScanOp = topOp;
-        Table tbl = tableScanOp.getConf().getTableMetadata();
-        List<Integer> neededColumnIds = tableScanOp.getNeededColumnIDs();
-        List<FieldSchema> columns = tbl.getCols();
-        List<String> cols = new ArrayList<String>();
-        for (int i = 0; i < neededColumnIds.size(); i++) {
-          cols.add(columns.get(neededColumnIds.get(i)).getName());
-        }
-        //map may not contain all sources, since input list may have been optimized out
-        //or non-existent tho such sources may still be referenced by the TableScanOperator
-        //if it's null then the partition probably doesn't exist so let's use table permission
-        if (tbl.isPartitioned() &&
-            Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
-          String alias_id = topOpMap.getKey();
-
-          PrunedPartitionList partsList = PartitionPruner.prune(tableScanOp,
-              parseCtx, alias_id);
-          Set<Partition> parts = partsList.getPartitions();
-          for (Partition part : parts) {
-            List<String> existingCols = part2Cols.get(part);
+      for (Map.Entry<String, TableScanOperator> topOpMap : querySem.getParseContext().getTopOps()
+          .entrySet()) {
+        TableScanOperator tableScanOp = topOpMap.getValue();
+        if (!tableScanOp.isInsideView()) {
+          Table tbl = tableScanOp.getConf().getTableMetadata();
+          List<Integer> neededColumnIds = tableScanOp.getNeededColumnIDs();
+          List<FieldSchema> columns = tbl.getCols();
+          List<String> cols = new ArrayList<String>();
+          for (int i = 0; i < neededColumnIds.size(); i++) {
+            cols.add(columns.get(neededColumnIds.get(i)).getName());
+          }
+          // map may not contain all sources, since input list may have been
+          // optimized out
+          // or non-existent tho such sources may still be referenced by the
+          // TableScanOperator
+          // if it's null then the partition probably doesn't exist so let's use
+          // table permission
+          if (tbl.isPartitioned()
+              && Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
+            String alias_id = topOpMap.getKey();
+
+            PrunedPartitionList partsList = PartitionPruner.prune(tableScanOp, parseCtx, alias_id);
+            Set<Partition> parts = partsList.getPartitions();
+            for (Partition part : parts) {
+              List<String> existingCols = part2Cols.get(part);
+              if (existingCols == null) {
+                existingCols = new ArrayList<String>();
+              }
+              existingCols.addAll(cols);
+              part2Cols.put(part, existingCols);
+            }
+          } else {
+            List<String> existingCols = tab2Cols.get(tbl);
             if (existingCols == null) {
               existingCols = new ArrayList<String>();
             }
             existingCols.addAll(cols);
-            part2Cols.put(part, existingCols);
-          }
-        } else {
-          List<String> existingCols = tab2Cols.get(tbl);
-          if (existingCols == null) {
-            existingCols = new ArrayList<String>();
+            tab2Cols.put(tbl, existingCols);
           }
-          existingCols.addAll(cols);
-          tab2Cols.put(tbl, existingCols);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
index 5253521..1b3cc82 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java
@@ -65,6 +65,8 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
 
   private transient int rowLimit = -1;
   private transient int currCount = 0;
+  // insiderView will tell this TableScan is inside a view or not.
+  private transient boolean insideView;
 
   private String defaultPartitionName;
 
@@ -362,4 +364,12 @@ public class TableScanOperator extends Operator<TableScanDesc> implements
     return ts;
   }
 
+  public boolean isInsideView() {
+    return insideView;
+  }
+
+  public void setInsideView(boolean insiderView) {
+    this.insideView = insiderView;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
index e9e9d0b..c9505e4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java
@@ -64,6 +64,8 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
   private final String concatQbIDAlias;
   private final boolean useQBIdInDigest;
   private final ImmutableSet<Integer> viurtualOrPartColIndxsInTS;
+  // insiderView will tell this TableScan is inside a view or not.
+  private final boolean insideView;
 
   public String getTableAlias() {
     return tblAlias;
@@ -86,12 +88,12 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
    *          HiveDB table
    */
   public HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
-      String alias, String concatQbIDAlias, boolean useQBIdInDigest) {
-    this(cluster, traitSet, table, alias, concatQbIDAlias, table.getRowType(), useQBIdInDigest);
+      String alias, String concatQbIDAlias, boolean useQBIdInDigest, boolean insideView) {
+    this(cluster, traitSet, table, alias, concatQbIDAlias, table.getRowType(), useQBIdInDigest, insideView);
   }
 
   private HiveTableScan(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table,
-      String alias, String concatQbIDAlias, RelDataType newRowtype, boolean useQBIdInDigest) {
+      String alias, String concatQbIDAlias, RelDataType newRowtype, boolean useQBIdInDigest, boolean insideView) {
     super(cluster, TraitsUtil.getDefaultTraitSet(cluster), table);
     assert getConvention() == HiveRelNode.CONVENTION;
     this.tblAlias = alias;
@@ -101,6 +103,7 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
     this.neededColIndxsFrmReloptHT = colIndxPair.getKey();
     this.viurtualOrPartColIndxsInTS = colIndxPair.getValue();
     this.useQBIdInDigest = useQBIdInDigest;
+    this.insideView = insideView;
   }
 
   @Override
@@ -118,7 +121,7 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
    */
   public HiveTableScan copy(RelDataType newRowtype) {
     return new HiveTableScan(getCluster(), getTraitSet(), ((RelOptHiveTable) table), this.tblAlias, this.concatQbIDAlias,
-            newRowtype, this.useQBIdInDigest);
+            newRowtype, this.useQBIdInDigest, this.insideView);
   }
 
   @Override
@@ -237,4 +240,16 @@ public class HiveTableScan extends TableScan implements HiveRelNode {
     return new Pair<ImmutableList<Integer>, ImmutableSet<Integer>>(neededColIndxsFrmReloptHT,
         viurtualOrPartColIndxsInTS);
   }
+
+  public boolean isInsideView() {
+    return insideView;
+  }
+
+  // We need to include isInsideView inside digest to differentiate direct
+  // tables and tables inside view. Otherwise, Calcite will treat them as the same.
+  public String computeDigest() {
+    String digest = super.computeDigest();
+    return digest + "[" + this.isInsideView() + "]";
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
index d39744b..682d0cb 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTBuilder.java
@@ -62,6 +62,14 @@ class ASTBuilder {
         ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME")
             .add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName())
             .add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName()));
+    // we need to carry the insideView information from calcite into the ast.
+    if (((HiveTableScan) scan).isInsideView()) {
+      b.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTIES, "TOK_TABLEPROPERTIES").add(
+          ASTBuilder.construct(HiveParser.TOK_TABLEPROPLIST, "TOK_TABLEPROPLIST").add(
+              ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY")
+                  .add(HiveParser.StringLiteral, "\"insideView\"")
+                  .add(HiveParser.StringLiteral, "\"TRUE\""))));
+    }
 
     // NOTE: Calcite considers tbls to be equal if their names are the same. Hence
     // we need to provide Calcite the fully qualified table name (dbname.tblname)

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index c36aa9d..f8860b7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -1706,7 +1706,8 @@ public class CalcitePlanner extends SemanticAnalyzer {
         tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable,
             null == tableAlias ? tabMetaData.getTableName() : tableAlias,
             getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,
-                HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP));
+                HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP), qb.isInsideView()
+                || qb.getAliasInsideView().contains(tableAlias.toLowerCase()));
 
         // 6. Add Schema(RR) to RelNode-Schema map
         ImmutableMap<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rr,

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
index 91352b2..cf3bbf0 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -59,6 +60,8 @@ public class QB {
   private CreateTableDesc tblDesc = null; // table descriptor of the final
   private CreateTableDesc directoryDesc = null ;
   private List<Path> encryptedTargetTablePaths;
+  private boolean insideView;
+  private Set<String> aliasInsideView;
 
   // used by PTFs
   /*
@@ -123,6 +126,7 @@ public class QB {
     ptfNodeToSpec = new LinkedHashMap<ASTNode, PTFInvocationSpec>();
     destToWindowingSpec = new LinkedHashMap<String, WindowingSpec>();
     id = getAppendedAliasFromId(outer_id, alias);
+    aliasInsideView = new HashSet<>();
   }
 
   // For sub-queries, the id. and alias should be appended since same aliases can be re-used
@@ -416,4 +420,16 @@ public class QB {
     return viewAliasToViewSchema;
   }
 
+  public boolean isInsideView() {
+    return insideView;
+  }
+
+  public void setInsideView(boolean insideView) {
+    this.insideView = insideView;
+  }
+
+  public Set<String> getAliasInsideView() {
+    return aliasInsideView;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 633c212..2dcb6d6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -435,14 +435,19 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     return ctx.getOpContext();
   }
 
-  @SuppressWarnings("nls")
   public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias)
       throws SemanticException {
+    doPhase1QBExpr(ast, qbexpr, id, alias, false);
+  }
+  @SuppressWarnings("nls")
+  public void doPhase1QBExpr(ASTNode ast, QBExpr qbexpr, String id, String alias, boolean insideView)
+      throws SemanticException {
 
     assert (ast.getToken() != null);
     switch (ast.getToken().getType()) {
     case HiveParser.TOK_QUERY: {
       QB qb = new QB(id, alias, true);
+      qb.setInsideView(insideView);
       Phase1Ctx ctx_1 = initPhase1Ctx();
       doPhase1(ast, qb, ctx_1, null);
 
@@ -456,14 +461,14 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       assert (ast.getChild(0) != null);
       QBExpr qbexpr1 = new QBExpr(alias + SUBQUERY_TAG_1);
       doPhase1QBExpr((ASTNode) ast.getChild(0), qbexpr1, id + SUBQUERY_TAG_1,
-          alias + SUBQUERY_TAG_1);
+          alias + SUBQUERY_TAG_1, insideView);
       qbexpr.setQBExpr1(qbexpr1);
 
       // query 2
       assert (ast.getChild(1) != null);
       QBExpr qbexpr2 = new QBExpr(alias + SUBQUERY_TAG_2);
       doPhase1QBExpr((ASTNode) ast.getChild(1), qbexpr2, id + SUBQUERY_TAG_2,
-          alias + SUBQUERY_TAG_2);
+          alias + SUBQUERY_TAG_2, insideView);
       qbexpr.setQBExpr2(qbexpr2);
     }
       break;
@@ -656,6 +661,10 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     if (propsIndex >= 0) {
       Tree propsAST = tabref.getChild(propsIndex);
       Map<String, String> props = DDLSemanticAnalyzer.getProps((ASTNode) propsAST.getChild(0));
+      // We get the information from Calcite.
+      if ("TRUE".equals(props.get("insideView"))) {
+        qb.getAliasInsideView().add(alias.toLowerCase());
+      }
       qb.setTabProps(alias, props);
     }
 
@@ -730,6 +739,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     }
     // Insert this map into the stats
     qb.setTabAlias(alias, tabIdName);
+    if (qb.isInsideView()) {
+      qb.getAliasInsideView().add(alias.toLowerCase());
+    }
     qb.addAlias(alias);
 
     qb.getParseInfo().setSrcForAlias(alias, tableTree);
@@ -1895,8 +1907,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         }
         replaceViewReferenceWithDefinition(qb, tab, tabName, alias);
         // This is the last time we'll see the Table objects for views, so add it to the inputs
-        // now
-        ReadEntity viewInput = new ReadEntity(tab, parentInput);
+        // now. isInsideView will tell if this view is embedded in another view.
+        ReadEntity viewInput = new ReadEntity(tab, parentInput, !qb.isInsideView());
         viewInput = PlanUtils.addInput(inputs, viewInput);
         aliasToViewInfo.put(alias, new ObjectPair<String, ReadEntity>(fullViewName, viewInput));
         viewAliasToInput.put(getAliasId(alias, qb), viewInput);
@@ -2303,8 +2315,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       throw new SemanticException(sb.toString(), e);
     }
     QBExpr qbexpr = new QBExpr(alias);
-    doPhase1QBExpr(viewTree, qbexpr, qb.getId(), alias);
-    if (!this.skipAuthorization()
+    doPhase1QBExpr(viewTree, qbexpr, qb.getId(), alias, true);
+    // if skip authorization, skip checking;
+    // if it is inside a view, skip checking;
+    // if authorization flag is not enabled, skip checking.
+    if (!this.skipAuthorization() && !qb.isInsideView()
         && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
       qb.rewriteViewToSubq(alias, tab_name, qbexpr, tab);
     }
@@ -9630,6 +9645,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
       top = (TableScanOperator) putOpInsertMap(OperatorFactory.get(getOpContext(), tsDesc,
           new RowSchema(rwsch.getColumnInfos())), rwsch);
 
+      // Set insiderView so that we can skip the column authorization for this.
+      top.setInsideView(qb.isInsideView() || qb.getAliasInsideView().contains(alias.toLowerCase()));
+
       // Add this to the list of top operators - we always start from a table
       // scan
       topOps.put(alias_id, top);

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_5.q b/ql/src/test/queries/clientnegative/authorization_view_5.q
new file mode 100644
index 0000000..3b042c3
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_5.q
@@ -0,0 +1,16 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table not grant to user
+
+--grant select on table v2 to user hive_test_user;
+
+select * from v2 order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_6.q b/ql/src/test/queries/clientnegative/authorization_view_6.q
new file mode 100644
index 0000000..38873a7
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_6.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+--grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_7.q b/ql/src/test/queries/clientnegative/authorization_view_7.q
new file mode 100644
index 0000000..3740bf9
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_7.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+--grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_5.q b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_5.q
new file mode 100644
index 0000000..a0070c2
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_5.q
@@ -0,0 +1,17 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table not grant to user
+
+--grant select on table v2 to user hive_test_user;
+
+select * from v2 order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_6.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_6.q b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_6.q
new file mode 100644
index 0000000..bc0d547
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_6.q
@@ -0,0 +1,19 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+--grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_7.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_7.q b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_7.q
new file mode 100644
index 0000000..51b453d
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/authorization_view_disable_cbo_7.q
@@ -0,0 +1,19 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+--grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_2.q b/ql/src/test/queries/clientpositive/authorization_view_2.q
new file mode 100644
index 0000000..8e6138d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_2.q
@@ -0,0 +1,16 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+select * from v2 order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_3.q b/ql/src/test/queries/clientpositive/authorization_view_3.q
new file mode 100644
index 0000000..aaf971e
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_3.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_4.q b/ql/src/test/queries/clientpositive/authorization_view_4.q
new file mode 100644
index 0000000..53ce350
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_4.q
@@ -0,0 +1,18 @@
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q
new file mode 100644
index 0000000..03d4387
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_2.q
@@ -0,0 +1,17 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+select * from v2 order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q
new file mode 100644
index 0000000..44f9503
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_3.q
@@ -0,0 +1,19 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src_autho_test;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q
new file mode 100644
index 0000000..40424c4
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/authorization_view_disable_cbo_4.q
@@ -0,0 +1,19 @@
+set hive.cbo.enable=false;
+set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
+
+create table src_autho_test as select * from src;
+
+create view v1 as select * from src;
+
+create view v2 as select * from v1;
+
+set hive.security.authorization.enabled=true;
+
+--table grant to user
+
+grant select on table v2 to user hive_test_user;
+
+grant select(key) on table src_autho_test to user hive_test_user;
+
+select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10;
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_5.q.out b/ql/src/test/results/clientnegative/authorization_view_5.q.out
new file mode 100644
index 0000000..a185146
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_5.q.out
@@ -0,0 +1,35 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:v2, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_6.q.out b/ql/src/test/results/clientnegative/authorization_view_6.q.out
new file mode 100644
index 0000000..6584497
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_6.q.out
@@ -0,0 +1,45 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_7.q.out b/ql/src/test/results/clientnegative/authorization_view_7.q.out
new file mode 100644
index 0000000..e7c93f7
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_7.q.out
@@ -0,0 +1,45 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_disable_cbo_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_5.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_5.q.out
new file mode 100644
index 0000000..a185146
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_5.q.out
@@ -0,0 +1,35 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:v2, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out
new file mode 100644
index 0000000..6584497
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out
@@ -0,0 +1,45 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out
new file mode 100644
index 0000000..e7c93f7
--- /dev/null
+++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out
@@ -0,0 +1,45 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details.

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_2.q.out b/ql/src/test/results/clientpositive/authorization_view_2.q.out
new file mode 100644
index 0000000..0b61663
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_2.q.out
@@ -0,0 +1,66 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: select * from v2 order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from v2 order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_3.q.out b/ql/src/test/results/clientpositive/authorization_view_3.q.out
new file mode 100644
index 0000000..cbc40b5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_3.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_4.q.out b/ql/src/test/results/clientpositive/authorization_view_4.q.out
new file mode 100644
index 0000000..f859923
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_4.q.out
@@ -0,0 +1,64 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out
new file mode 100644
index 0000000..0b61663
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out
@@ -0,0 +1,66 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: select * from v2 order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select * from v2 order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####
+0	val_0
+0	val_0
+0	val_0
+10	val_10
+100	val_100
+100	val_100
+103	val_103
+103	val_103
+104	val_104
+104	val_104

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out
new file mode 100644
index 0000000..cbc40b5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out
@@ -0,0 +1,62 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src_autho_test
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src_autho_test
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out
new file mode 100644
index 0000000..f859923
--- /dev/null
+++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out
@@ -0,0 +1,64 @@
+PREHOOK: query: create table src_autho_test as select * from src
+PREHOOK: type: CREATETABLE_AS_SELECT
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: create table src_autho_test as select * from src
+POSTHOOK: type: CREATETABLE_AS_SELECT
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@src_autho_test
+POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: create view v1 as select * from src
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v1
+POSTHOOK: query: create view v1 as select * from src
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v1
+PREHOOK: query: create view v2 as select * from v1
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@src
+PREHOOK: Input: default@v1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@v2
+POSTHOOK: query: create view v2 as select * from v1
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@v1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@v2
+PREHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@v2
+POSTHOOK: query: --table grant to user
+
+grant select on table v2 to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@v2
+PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+PREHOOK: type: GRANT_PRIVILEGE
+PREHOOK: Output: default@src_autho_test
+POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user
+POSTHOOK: type: GRANT_PRIVILEGE
+POSTHOOK: Output: default@src_autho_test
+PREHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+PREHOOK: Input: default@src_autho_test
+PREHOOK: Input: default@v1
+PREHOOK: Input: default@v2
+#### A masked pattern was here ####
+POSTHOOK: query: select v2.key from v2 join (select key from src_autho_test)subq on v2.value=subq.key order by key limit 10
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+POSTHOOK: Input: default@src_autho_test
+POSTHOOK: Input: default@v1
+POSTHOOK: Input: default@v2
+#### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/subquery_views.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out
index 6ab2ad0..fab919d 100644
--- a/ql/src/test/results/clientpositive/subquery_views.q.out
+++ b/ql/src/test/results/clientpositive/subquery_views.q.out
@@ -136,7 +136,7 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: b
+            alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((value > 'val_11') and (key is null or value is null)) (type: boolean)
@@ -217,7 +217,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col0 (type: string)
               Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
           TableScan
-            alias: b
+            alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((value > 'val_11') and (key < '11')) (type: boolean)
@@ -291,7 +291,7 @@ STAGE PLANS:
     Map Reduce
       Map Operator Tree:
           TableScan
-            alias: b
+            alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((value > 'val_11') and (key is null or value is null)) (type: boolean)
@@ -372,7 +372,7 @@ STAGE PLANS:
               Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col0 (type: string)
               Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE
           TableScan
-            alias: b
+            alias: a
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
             Filter Operator
               predicate: ((value > 'val_11') and (key < '11')) (type: boolean)

http://git-wip-us.apache.org/repos/asf/hive/blob/ca165db8/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
index a3ff85c..b7a8174 100644
--- a/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/tez/explainuser_1.q.out
@@ -2001,7 +2001,7 @@ Stage-0
                 Filter Operator [FIL_16] (rows=166 width=178)
                   predicate:((value > 'val_9') and key is not null)
                   TableScan [TS_3] (rows=500 width=178)
-                    default@src_cbo,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
+                    default@src_cbo,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"]
 
 PREHOOK: query: explain select * 
 from (select * 


[38/51] [abbrv] hive git commit: HIVE-13185 : orc.ReaderImp.ensureOrcFooter() method fails on small text files with IndexOutOfBoundsException (Illya Yalovyy, reviewed by Sergey Shelukhin)

Posted by jd...@apache.org.
HIVE-13185 : orc.ReaderImp.ensureOrcFooter() method fails on small text files with IndexOutOfBoundsException (Illya Yalovyy, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f07fdfbc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f07fdfbc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f07fdfbc

Branch: refs/heads/llap
Commit: f07fdfbc7226364cdb21784b308bd2adfe114309
Parents: b6af012
Author: Sergey Shelukhin <se...@apache.org>
Authored: Mon Mar 14 16:57:57 2016 -0700
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Mon Mar 14 16:57:57 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       |  15 +-
 .../hadoop/hive/ql/io/orc/TestReaderImpl.java   | 151 +++++++++++++++++++
 2 files changed, 159 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f07fdfbc/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 773c2b1..a031a92 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -256,21 +256,22 @@ public class ReaderImpl implements Reader {
                                       Path path,
                                       int psLen,
                                       ByteBuffer buffer) throws IOException {
-    int len = OrcFile.MAGIC.length();
-    if (psLen < len + 1) {
+    int magicLength = OrcFile.MAGIC.length();
+    int fullLength = magicLength + 1;
+    if (psLen < fullLength || buffer.remaining() < fullLength) {
       throw new FileFormatException("Malformed ORC file " + path +
           ". Invalid postscript length " + psLen);
     }
-    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - 1 - len;
+    int offset = buffer.arrayOffset() + buffer.position() + buffer.limit() - fullLength;
     byte[] array = buffer.array();
     // now look for the magic string at the end of the postscript.
-    if (!Text.decode(array, offset, len).equals(OrcFile.MAGIC)) {
+    if (!Text.decode(array, offset, magicLength).equals(OrcFile.MAGIC)) {
       // If it isn't there, this may be the 0.11.0 version of ORC.
       // Read the first 3 bytes of the file to check for the header
-      byte[] header = new byte[len];
-      in.readFully(0, header, 0, len);
+      byte[] header = new byte[magicLength];
+      in.readFully(0, header, 0, magicLength);
       // if it isn't there, this isn't an ORC file
-      if (!Text.decode(header, 0 , len).equals(OrcFile.MAGIC)) {
+      if (!Text.decode(header, 0 , magicLength).equals(OrcFile.MAGIC)) {
         throw new FileFormatException("Malformed ORC file " + path +
             ". Invalid postscript.");
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/f07fdfbc/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestReaderImpl.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestReaderImpl.java
new file mode 100644
index 0000000..e0199d6
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestReaderImpl.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2016 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.io.orc;
+
+import java.io.ByteArrayInputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.charset.CharacterCodingException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.hive.ql.io.FileFormatException;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.rules.ExpectedException;
+
+public class TestReaderImpl {
+
+  @Rule
+  public ExpectedException thrown = ExpectedException.none();
+
+  private final Path path = new Path("test-file.orc");
+  private FSDataInputStream in;
+  private int psLen;
+  private ByteBuffer buffer;
+
+  @Before
+  public void setup() {
+    in = null;
+  }
+
+  @Test
+  public void testEnsureOrcFooterSmallTextFile() throws IOException {
+    prepareTestCase("1".getBytes());
+    thrown.expect(FileFormatException.class);
+    ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
+  }
+
+  @Test
+  public void testEnsureOrcFooterLargeTextFile() throws IOException {
+    prepareTestCase("This is Some Text File".getBytes());
+    thrown.expect(FileFormatException.class);
+    ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
+  }
+
+  @Test
+  public void testEnsureOrcFooter011ORCFile() throws IOException {
+    prepareTestCase(composeContent(OrcFile.MAGIC, "FOOTER"));
+    ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
+  }
+
+  @Test
+  public void testEnsureOrcFooterCorrectORCFooter() throws IOException {
+    prepareTestCase(composeContent("",OrcFile.MAGIC));
+    ReaderImpl.ensureOrcFooter(in, path, psLen, buffer);
+  }
+
+  private void prepareTestCase(byte[] bytes) {
+    buffer = ByteBuffer.wrap(bytes);
+    psLen = buffer.get(bytes.length - 1) & 0xff;
+    in = new FSDataInputStream(new SeekableByteArrayInputStream(bytes));
+  }
+
+  private byte[] composeContent(String headerStr, String footerStr) throws CharacterCodingException {
+    ByteBuffer header = Text.encode(headerStr);
+    ByteBuffer footer = Text.encode(footerStr);
+    int headerLen = header.remaining();
+    int footerLen = footer.remaining() + 1;
+
+    ByteBuffer buf = ByteBuffer.allocate(headerLen + footerLen);
+
+    buf.put(header);
+    buf.put(footer);
+    buf.put((byte) footerLen);
+    return buf.array();
+  }
+
+  private static final class SeekableByteArrayInputStream extends ByteArrayInputStream
+          implements Seekable, PositionedReadable {
+
+    public SeekableByteArrayInputStream(byte[] buf) {
+      super(buf);
+    }
+
+    @Override
+    public void seek(long pos) throws IOException {
+      this.reset();
+      this.skip(pos);
+    }
+
+    @Override
+    public long getPos() throws IOException {
+      return pos;
+    }
+
+    @Override
+    public boolean seekToNewSource(long targetPos) throws IOException {
+      return false;
+    }
+
+    @Override
+    public int read(long position, byte[] buffer, int offset, int length)
+            throws IOException {
+      long oldPos = getPos();
+      int nread = -1;
+      try {
+        seek(position);
+        nread = read(buffer, offset, length);
+      } finally {
+        seek(oldPos);
+      }
+      return nread;
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer, int offset, int length)
+            throws IOException {
+      int nread = 0;
+      while (nread < length) {
+        int nbytes = read(position + nread, buffer, offset + nread, length - nread);
+        if (nbytes < 0) {
+          throw new EOFException("End of file reached before reading fully.");
+        }
+        nread += nbytes;
+      }
+    }
+
+    @Override
+    public void readFully(long position, byte[] buffer)
+            throws IOException {
+      readFully(position, buffer, 0, buffer.length);
+    }
+  }
+}


[13/51] [abbrv] hive git commit: HIVE-13211 : normalize Hive.get overloads to go thru one path (Sergey Shelukhin, reviewed by Ashutosh Chauhan)

Posted by jd...@apache.org.
HIVE-13211 : normalize Hive.get overloads to go thru one path (Sergey Shelukhin, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5bf324ea
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5bf324ea
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5bf324ea

Branch: refs/heads/llap
Commit: 5bf324ea5eaf308233a2af6149e8fb01bee0e4c6
Parents: 3931d4d
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Mar 9 11:03:52 2016 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed Mar 9 11:03:52 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/metadata/Hive.java    | 92 +++++++++++---------
 .../hive/ql/parse/BaseSemanticAnalyzer.java     |  2 +-
 2 files changed, 52 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5bf324ea/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index fdc7956..80208c2 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -248,7 +248,7 @@ public class Hive {
    *
    */
   public static Hive get(HiveConf c) throws HiveException {
-    return getInternal(c, false);
+    return getInternal(c, false, false, true);
   }
 
   /**
@@ -256,24 +256,67 @@ public class Hive {
    * MS client, assuming the relevant settings would be unchanged within the same conf object.
    */
   public static Hive getWithFastCheck(HiveConf c) throws HiveException {
-    return getInternal(c, true);
+    return getWithFastCheck(c, true);
   }
 
-  private static Hive getInternal(HiveConf c, boolean isFastCheck) throws HiveException {
+  /**
+   * Same as {@link #get(HiveConf)}, except that it checks only the object identity of existing
+   * MS client, assuming the relevant settings would be unchanged within the same conf object.
+   */
+  public static Hive getWithFastCheck(HiveConf c, boolean doRegisterAllFns) throws HiveException {
+    return getInternal(c, false, true, doRegisterAllFns);
+  }
+
+  private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFastCheck,
+      boolean doRegisterAllFns) throws HiveException {
     Hive db = hiveDB.get();
-    if (db == null || !db.isCurrentUserOwner() ||
-        (db.metaStoreClient != null && !isCompatible(db, c, isFastCheck))) {
-      return get(c, true);
+    if (db == null || !db.isCurrentUserOwner() || needsRefresh
+        || (c != null && db.metaStoreClient != null && !isCompatible(db, c, isFastCheck))) {
+      return create(c, false, db, doRegisterAllFns);
+    }
+    if (c != null) {
+      db.conf = c;
     }
-    db.conf = c;
     return db;
   }
 
+  private static Hive create(HiveConf c, boolean needsRefresh, Hive db, boolean doRegisterAllFns)
+      throws HiveException {
+    if (db != null) {
+      LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh +
+        ", db.isCurrentUserOwner = " + db.isCurrentUserOwner());
+      db.close();
+    }
+    closeCurrent();
+    if (c == null) {
+      c = createHiveConf();
+    }
+    c.set("fs.scheme.class", "dfs");
+    Hive newdb = new Hive(c, doRegisterAllFns);
+    hiveDB.set(newdb);
+    return newdb;
+  }
+
+
+  private static HiveConf createHiveConf() {
+    SessionState session = SessionState.get();
+    return (session == null) ? new HiveConf(Hive.class) : session.getConf();
+  }
+
   private static boolean isCompatible(Hive db, HiveConf c, boolean isFastCheck) {
     return isFastCheck
         ? db.metaStoreClient.isSameConfObj(c) : db.metaStoreClient.isCompatibleWith(c);
   }
 
+
+  public static Hive get() throws HiveException {
+    return get(true);
+  }
+
+  public static Hive get(boolean doRegisterAllFns) throws HiveException {
+    return getInternal(null, false, false, doRegisterAllFns);
+  }
+
   /**
    * get a connection to metastore. see get(HiveConf) function for comments
    *
@@ -285,40 +328,7 @@ public class Hive {
    * @throws HiveException
    */
   public static Hive get(HiveConf c, boolean needsRefresh) throws HiveException {
-    Hive db = hiveDB.get();
-    if (db == null || needsRefresh || !db.isCurrentUserOwner()) {
-      if (db != null) {
-        LOG.debug("Creating new db. db = " + db + ", needsRefresh = " + needsRefresh +
-          ", db.isCurrentUserOwner = " + db.isCurrentUserOwner());
-      }
-      closeCurrent();
-      c.set("fs.scheme.class", "dfs");
-      Hive newdb = new Hive(c, true);
-      hiveDB.set(newdb);
-      return newdb;
-    }
-    db.conf = c;
-    return db;
-  }
-
-  public static Hive get() throws HiveException {
-    return get(true);
-  }
-
-  public static Hive get(boolean doRegisterAllFns) throws HiveException {
-    Hive db = hiveDB.get();
-    if (db != null && !db.isCurrentUserOwner()) {
-      LOG.debug("Creating new db. db.isCurrentUserOwner = " + db.isCurrentUserOwner());
-      db.close();
-      db = null;
-    }
-    if (db == null) {
-      SessionState session = SessionState.get();
-      HiveConf conf = session == null ? new HiveConf(Hive.class) : session.getConf();
-      db = new Hive(conf, doRegisterAllFns);
-      hiveDB.set(db);
-    }
-    return db;
+    return getInternal(c, needsRefresh, false, true);
   }
 
   public static void set(Hive hive) {

http://git-wip-us.apache.org/repos/asf/hive/blob/5bf324ea/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
index b36a9a0..f6ba521 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
@@ -200,7 +200,7 @@ public abstract class BaseSemanticAnalyzer {
   }
 
   public BaseSemanticAnalyzer(HiveConf conf) throws SemanticException {
-   this(conf, createHiveDB(conf));
+    this(conf, createHiveDB(conf));
   }
 
   public BaseSemanticAnalyzer(HiveConf conf, Hive db) throws SemanticException {


[36/51] [abbrv] hive git commit: HIVE-12481: Occasionally "Request is a replay" will be thrown from HS2 (Reviewed by Yongzhi Chen)

Posted by jd...@apache.org.
HIVE-12481: Occasionally "Request is a replay" will be thrown from HS2 (Reviewed by Yongzhi Chen)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e7a17566
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e7a17566
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e7a17566

Branch: refs/heads/llap
Commit: e7a17566314595578ae85a578d8abace4102b6d7
Parents: d4c1fdc
Author: Aihua Xu <ai...@apache.org>
Authored: Wed Mar 2 17:28:26 2016 -0500
Committer: Aihua Xu <ai...@apache.org>
Committed: Mon Mar 14 16:19:50 2016 -0400

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/HiveConnection.java     | 19 +++++++++++++++++--
 jdbc/src/java/org/apache/hive/jdbc/Utils.java    |  4 ++++
 2 files changed, 21 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e7a17566/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
index 873f421..352744f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java
@@ -63,6 +63,7 @@ import javax.net.ssl.SSLContext;
 import javax.net.ssl.TrustManagerFactory;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
+
 import java.io.FileInputStream;
 import java.io.IOException;
 import java.lang.reflect.InvocationHandler;
@@ -176,6 +177,13 @@ public class HiveConnection implements java.sql.Connection {
   }
 
   private void openTransport() throws SQLException {
+    int numRetries = 0;
+    int maxRetries = 1;
+    try {
+      maxRetries = Integer.parseInt(sessConfMap.get(JdbcConnectionParams.RETRIES));
+    } catch(NumberFormatException e) {
+    }
+
     while (true) {
       try {
         assumeSubject =
@@ -208,8 +216,15 @@ public class HiveConnection implements java.sql.Connection {
         } else {
           LOG.info("Transport Used for JDBC connection: " +
             sessConfMap.get(JdbcConnectionParams.TRANSPORT_MODE));
-          throw new SQLException("Could not open client transport with JDBC Uri: " + jdbcUriString
-              + ": " + e.getMessage(), " 08S01", e);
+
+          // Retry maxRetries times
+          String errMsg = "Could not open client transport with JDBC Uri: " +
+              jdbcUriString + ": " + e.getMessage();
+          if (++numRetries >= maxRetries) {
+            throw new SQLException(errMsg, " 08S01", e);
+          } else {
+            LOG.warn(errMsg + " Retrying " + numRetries + " of " + maxRetries);
+          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/e7a17566/jdbc/src/java/org/apache/hive/jdbc/Utils.java
----------------------------------------------------------------------
diff --git a/jdbc/src/java/org/apache/hive/jdbc/Utils.java b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
index 080e8fc..754f89f 100644
--- a/jdbc/src/java/org/apache/hive/jdbc/Utils.java
+++ b/jdbc/src/java/org/apache/hive/jdbc/Utils.java
@@ -72,6 +72,10 @@ class Utils {
     // client side params are specified in sess_var_list
 
     // Client param names:
+
+    // Retry setting
+    static final String RETRIES = "retries";
+
     static final String AUTH_TYPE = "auth";
     // We're deprecating this variable's name.
     static final String AUTH_QOP_DEPRECATED = "sasl.qop";


[48/51] [abbrv] hive git commit: HIVE-13260: ReduceSinkDeDuplication throws exception when pRS key is empty (Pengcheng Xiong, reviewed by Prasanth Jayachandran)

Posted by jd...@apache.org.
HIVE-13260: ReduceSinkDeDuplication throws exception when pRS key is empty (Pengcheng Xiong, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/06b604a0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/06b604a0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/06b604a0

Branch: refs/heads/llap
Commit: 06b604a03ba1c137c771c4f2dcbcd79249ffd141
Parents: 868db42
Author: Pengcheng Xiong <px...@apache.org>
Authored: Wed Mar 16 10:07:44 2016 -0700
Committer: Pengcheng Xiong <px...@apache.org>
Committed: Wed Mar 16 10:07:44 2016 -0700

----------------------------------------------------------------------
 .../correlation/ReduceSinkDeDuplication.java    |  18 +-
 .../reduceSinkDeDuplication_pRS_key_empty.q     |  60 +++++
 .../reduceSinkDeDuplication_pRS_key_empty.q.out | 220 +++++++++++++++++++
 3 files changed, 288 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/06b604a0/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
index 59c87a3..733620b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
@@ -312,17 +312,15 @@ public class ReduceSinkDeDuplication extends Transform {
       if (result[4] > 0) {
         // This case happens only when pRS key is empty in which case we can use
         // number of distribution keys and key serialization info from cRS
-        pRS.getConf().setNumDistributionKeys(cRS.getConf().getNumDistributionKeys());
-        List<FieldSchema> fields = PlanUtils.getFieldSchemasFromColumnList(pRS.getConf()
-            .getKeyCols(), "reducesinkkey");
-        TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(fields, pRS.getConf().getOrder(),
-                pRS.getConf().getNullOrder());
-        ArrayList<String> outputKeyCols = Lists.newArrayList();
-        for (int i = 0; i < fields.size(); i++) {
-          outputKeyCols.add(fields.get(i).getName());
+        if (pRS.getConf().getKeyCols() != null && pRS.getConf().getKeyCols().size() == 0
+            && cRS.getConf().getKeyCols() != null && cRS.getConf().getKeyCols().size() == 0) {
+          // As setNumDistributionKeys is a subset of keycols, the size should
+          // be 0 too. This condition maybe too strict. We may extend it in the
+          // future.
+          TableDesc keyTable = PlanUtils.getReduceKeyTableDesc(new ArrayList<FieldSchema>(), pRS
+              .getConf().getOrder(), pRS.getConf().getNullOrder());
+          pRS.getConf().setKeySerializeInfo(keyTable);
         }
-        pRS.getConf().setOutputKeyColumnNames(outputKeyCols);
-        pRS.getConf().setKeySerializeInfo(keyTable);
       }
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/06b604a0/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q b/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q
new file mode 100644
index 0000000..8bbae39
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q
@@ -0,0 +1,60 @@
+set hive.mapred.mode=nonstrict;
+set hive.cbo.enable=false;
+
+set hive.map.aggr=false;
+
+set hive.groupby.skewindata=false;
+set mapred.reduce.tasks=31;
+
+
+select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq;
+
+explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq;
+
+select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq;
+ 
+set hive.optimize.reducededuplication=false;
+
+explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq;
+
+select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq;

http://git-wip-us.apache.org/repos/asf/hive/blob/06b604a0/ql/src/test/results/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q.out b/ql/src/test/results/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q.out
new file mode 100644
index 0000000..4a848f2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q.out
@@ -0,0 +1,220 @@
+PREHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"columntype":"Double","min":260.182,"max":260.182,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{0}{0}{0}{1}{1}{1}{0}{0}{0}{0}{0}{1}{2}{1}{0}"}	{"columntype":"String","maxlength":2,"avglength":2.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{2}{0}{3}{6}{3}{0}{1}{1}{0}{0}{0}{0}{0}{0}{0}"}	{"columntype":"Double","min":20428.07287599998,"max":20428.07287599998,"countnulls":0,"numdistinctvalues":2,"ndvbitvector":"{0}{0}{3}{0}{1}{1}{0}{0}{0}{0}{0}{0}{0}{4}{2}{0}"}	{"columntype":"Double","min":20469.01089779557,"max":20469.01089779557,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{0}{1}{3}{2}{3}{5}{2}{0}{1}{0}{1}{1}{1}{1}{0}{1}"}
+PREHOOK: query: explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: value (type: string)
+              outputColumnNames: value
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: substr(value, 5) (type: string)
+                sort order: +
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0)
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+          Group By Operator
+            aggregations: compute_stats(_col0, 16), compute_stats(_col1, 16), compute_stats(_col2, 16), compute_stats(_col3, 16)
+            mode: complete
+            outputColumnNames: _col0, _col1, _col2, _col3
+            Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"columntype":"Double","min":256.10355987055016,"max":256.10355987055016,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{2}{1}{0}{2}{0}{1}{1}{1}{0}{0}{1}{1}{0}{2}{1}{0}"}	{"columntype":"String","maxlength":2,"avglength":2.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{2}{0}{3}{6}{3}{0}{1}{1}{0}{0}{0}{0}{0}{0}{0}"}	{"columntype":"Double","min":20428.07287599999,"max":20428.07287599999,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{4}{0}{0}{4}{3}{0}{1}{0}{0}{0}{0}{0}{0}{1}{2}"}	{"columntype":"Double","min":20469.010897795582,"max":20469.010897795582,"countnulls":0,"numdistinctvalues":2,"ndvbitvector":"{2}{0}{2}{2}{0}{0}{2}{0}{0}{0}{0}{0}{1}{0}{0}{0}"}
+PREHOOK: query: explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: value (type: string)
+              outputColumnNames: value
+              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: substr(value, 5) (type: string)
+                sort order: +
+                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0)
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+              value expressions: _col0 (type: double), _col1 (type: string), _col2 (type: double), _col3 (type: double)
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: compute_stats(VALUE._col0, 16), compute_stats(VALUE._col2, 16), compute_stats(VALUE._col3, 16), compute_stats(VALUE._col4, 16)
+          mode: complete
+          outputColumnNames: _col0, _col1, _col2, _col3
+          Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 108 Basic stats: COMPLETE Column stats: NONE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+PREHOOK: type: QUERY
+PREHOOK: Input: default@src
+#### A masked pattern was here ####
+POSTHOOK: query: select compute_stats(a,16),compute_stats(b,16),compute_stats(c,16),compute_stats(d,16)
+from
+(
+select
+  avg(DISTINCT substr(src.value,5)) as a,
+  max(substr(src.value,5)) as b,
+  variance(substr(src.value,5)) as c,
+  var_samp(substr(src.value,5)) as d
+ from src)subq
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@src
+#### A masked pattern was here ####
+{"columntype":"Double","min":256.10355987055016,"max":256.10355987055016,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{2}{1}{0}{2}{0}{1}{1}{1}{0}{0}{1}{1}{0}{2}{1}{0}"}	{"columntype":"String","maxlength":2,"avglength":2.0,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{2}{0}{3}{6}{3}{0}{1}{1}{0}{0}{0}{0}{0}{0}{0}"}	{"columntype":"Double","min":20428.07287599999,"max":20428.07287599999,"countnulls":0,"numdistinctvalues":1,"ndvbitvector":"{1}{4}{0}{0}{4}{3}{0}{1}{0}{0}{0}{0}{0}{0}{1}{2}"}	{"columntype":"Double","min":20469.010897795582,"max":20469.010897795582,"countnulls":0,"numdistinctvalues":2,"ndvbitvector":"{2}{0}{2}{2}{0}{0}{2}{0}{0}{0}{0}{0}{1}{0}{0}{0}"}


[27/51] [abbrv] hive git commit: HIVE-13236 : LLAP: token renewal interval needs to be set (Sergey Shelukhin, reviewed by Siddharth Seth)

Posted by jd...@apache.org.
HIVE-13236 : LLAP: token renewal interval needs to be set (Sergey Shelukhin, reviewed by Siddharth Seth)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/62bae5e1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/62bae5e1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/62bae5e1

Branch: refs/heads/llap
Commit: 62bae5e1a5cc563c5ef3f650927f2a63038c5a50
Parents: b6023c7
Author: Sergey Shelukhin <se...@apache.org>
Authored: Thu Mar 10 19:35:55 2016 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Thu Mar 10 19:35:55 2016 -0800

----------------------------------------------------------------------
 .../hive/llap/security/SecretManager.java       | 39 +++++++++++++++++++-
 1 file changed, 37 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/62bae5e1/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java b/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
index dc4e81a..bbdca7b 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/security/SecretManager.java
@@ -27,10 +27,43 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecretManager;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class SecretManager extends ZKDelegationTokenSecretManager<LlapTokenIdentifier> {
+  private static final Logger LOG = LoggerFactory.getLogger(SecretManager.class);
   public SecretManager(Configuration conf) {
     super(conf);
+    checkForZKDTSMBug(conf);
+  }
+
+  // Workaround for HADOOP-12659 - remove when Hadoop 2.7.X is no longer supported.
+  private void checkForZKDTSMBug(Configuration conf) {
+    // There's a bug in ZKDelegationTokenSecretManager ctor where seconds are not converted to ms.
+    long expectedRenewTimeSec = conf.getLong(DelegationTokenManager.RENEW_INTERVAL, -1);
+    LOG.info("Checking for tokenRenewInterval bug: " + expectedRenewTimeSec);
+    if (expectedRenewTimeSec == -1) return; // The default works, no bug.
+    java.lang.reflect.Field f = null;
+    try {
+     Class<?> c = org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.class;
+     f = c.getDeclaredField("tokenRenewInterval");
+     f.setAccessible(true);
+    } catch (Throwable t) {
+      // Maybe someone removed the field; probably ok to ignore.
+      LOG.error("Failed to check for tokenRenewInterval bug, hoping for the best", t);
+      return;
+    }
+    try {
+      long realValue = f.getLong(this);
+      long expectedValue = expectedRenewTimeSec * 1000;
+      LOG.info("tokenRenewInterval is: " + realValue + " (expected " + expectedValue + ")");
+      if (realValue == expectedRenewTimeSec) {
+        // Bug - the field has to be in ms, not sec. Override only if set precisely to sec.
+        f.setLong(this, expectedValue);
+      }
+    } catch (Exception ex) {
+      throw new RuntimeException("Failed to address tokenRenewInterval bug", ex);
+    }
   }
 
   @Override
@@ -62,8 +95,10 @@ public class SecretManager extends ZKDelegationTokenSecretManager<LlapTokenIdent
     // Override the default delegation token lifetime for LLAP.
     // Also set all the necessary ZK settings to defaults and LLAP configs, if not set.
     final Configuration zkConf = new Configuration(conf);
-    zkConf.setLong(DelegationTokenManager.MAX_LIFETIME,
-        HiveConf.getTimeVar(conf, ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME, TimeUnit.SECONDS));
+    long tokenLifetime = HiveConf.getTimeVar(
+        conf, ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME, TimeUnit.SECONDS);
+    zkConf.setLong(DelegationTokenManager.MAX_LIFETIME, tokenLifetime);
+    zkConf.setLong(DelegationTokenManager.RENEW_INTERVAL, tokenLifetime);
     zkConf.set(SecretManager.ZK_DTSM_ZK_KERBEROS_PRINCIPAL, principal);
     zkConf.set(SecretManager.ZK_DTSM_ZK_KERBEROS_KEYTAB, keyTab);
     setZkConfIfNotSet(zkConf, SecretManager.ZK_DTSM_ZNODE_WORKING_PATH, "llapzkdtsm");


[05/51] [abbrv] hive git commit: HIVE-12270: Add DBTokenStore support to HS2 delegation token (Chaoyu Tang, reviewed by Szehon Ho)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 078c1cd..8a8f8b1 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -133,6 +133,14 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) = 0;
   virtual int64_t renew_delegation_token(const std::string& token_str_form) = 0;
   virtual void cancel_delegation_token(const std::string& token_str_form) = 0;
+  virtual bool add_token(const std::string& token_identifier, const std::string& delegation_token) = 0;
+  virtual bool remove_token(const std::string& token_identifier) = 0;
+  virtual void get_token(std::string& _return, const std::string& token_identifier) = 0;
+  virtual void get_all_token_identifiers(std::vector<std::string> & _return) = 0;
+  virtual int32_t add_master_key(const std::string& key) = 0;
+  virtual void update_master_key(const int32_t seq_number, const std::string& key) = 0;
+  virtual bool remove_master_key(const int32_t key_seq) = 0;
+  virtual void get_master_keys(std::vector<std::string> & _return) = 0;
   virtual void get_open_txns(GetOpenTxnsResponse& _return) = 0;
   virtual void get_open_txns_info(GetOpenTxnsInfoResponse& _return) = 0;
   virtual void open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) = 0;
@@ -543,6 +551,34 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void cancel_delegation_token(const std::string& /* token_str_form */) {
     return;
   }
+  bool add_token(const std::string& /* token_identifier */, const std::string& /* delegation_token */) {
+    bool _return = false;
+    return _return;
+  }
+  bool remove_token(const std::string& /* token_identifier */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_token(std::string& /* _return */, const std::string& /* token_identifier */) {
+    return;
+  }
+  void get_all_token_identifiers(std::vector<std::string> & /* _return */) {
+    return;
+  }
+  int32_t add_master_key(const std::string& /* key */) {
+    int32_t _return = 0;
+    return _return;
+  }
+  void update_master_key(const int32_t /* seq_number */, const std::string& /* key */) {
+    return;
+  }
+  bool remove_master_key(const int32_t /* key_seq */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_master_keys(std::vector<std::string> & /* _return */) {
+    return;
+  }
   void get_open_txns(GetOpenTxnsResponse& /* _return */) {
     return;
   }
@@ -15023,26 +15059,43 @@ class ThriftHiveMetastore_cancel_delegation_token_presult {
 
 };
 
+typedef struct _ThriftHiveMetastore_add_token_args__isset {
+  _ThriftHiveMetastore_add_token_args__isset() : token_identifier(false), delegation_token(false) {}
+  bool token_identifier :1;
+  bool delegation_token :1;
+} _ThriftHiveMetastore_add_token_args__isset;
 
-class ThriftHiveMetastore_get_open_txns_args {
+class ThriftHiveMetastore_add_token_args {
  public:
 
-  ThriftHiveMetastore_get_open_txns_args(const ThriftHiveMetastore_get_open_txns_args&);
-  ThriftHiveMetastore_get_open_txns_args& operator=(const ThriftHiveMetastore_get_open_txns_args&);
-  ThriftHiveMetastore_get_open_txns_args() {
+  ThriftHiveMetastore_add_token_args(const ThriftHiveMetastore_add_token_args&);
+  ThriftHiveMetastore_add_token_args& operator=(const ThriftHiveMetastore_add_token_args&);
+  ThriftHiveMetastore_add_token_args() : token_identifier(), delegation_token() {
   }
 
-  virtual ~ThriftHiveMetastore_get_open_txns_args() throw();
+  virtual ~ThriftHiveMetastore_add_token_args() throw();
+  std::string token_identifier;
+  std::string delegation_token;
 
-  bool operator == (const ThriftHiveMetastore_get_open_txns_args & /* rhs */) const
+  _ThriftHiveMetastore_add_token_args__isset __isset;
+
+  void __set_token_identifier(const std::string& val);
+
+  void __set_delegation_token(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_add_token_args & rhs) const
   {
+    if (!(token_identifier == rhs.token_identifier))
+      return false;
+    if (!(delegation_token == rhs.delegation_token))
+      return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_get_open_txns_args &rhs) const {
+  bool operator != (const ThriftHiveMetastore_add_token_args &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_get_open_txns_args & ) const;
+  bool operator < (const ThriftHiveMetastore_add_token_args & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -15050,91 +15103,104 @@ class ThriftHiveMetastore_get_open_txns_args {
 };
 
 
-class ThriftHiveMetastore_get_open_txns_pargs {
+class ThriftHiveMetastore_add_token_pargs {
  public:
 
 
-  virtual ~ThriftHiveMetastore_get_open_txns_pargs() throw();
+  virtual ~ThriftHiveMetastore_add_token_pargs() throw();
+  const std::string* token_identifier;
+  const std::string* delegation_token;
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_get_open_txns_result__isset {
-  _ThriftHiveMetastore_get_open_txns_result__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_add_token_result__isset {
+  _ThriftHiveMetastore_add_token_result__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_get_open_txns_result__isset;
+} _ThriftHiveMetastore_add_token_result__isset;
 
-class ThriftHiveMetastore_get_open_txns_result {
+class ThriftHiveMetastore_add_token_result {
  public:
 
-  ThriftHiveMetastore_get_open_txns_result(const ThriftHiveMetastore_get_open_txns_result&);
-  ThriftHiveMetastore_get_open_txns_result& operator=(const ThriftHiveMetastore_get_open_txns_result&);
-  ThriftHiveMetastore_get_open_txns_result() {
+  ThriftHiveMetastore_add_token_result(const ThriftHiveMetastore_add_token_result&);
+  ThriftHiveMetastore_add_token_result& operator=(const ThriftHiveMetastore_add_token_result&);
+  ThriftHiveMetastore_add_token_result() : success(0) {
   }
 
-  virtual ~ThriftHiveMetastore_get_open_txns_result() throw();
-  GetOpenTxnsResponse success;
+  virtual ~ThriftHiveMetastore_add_token_result() throw();
+  bool success;
 
-  _ThriftHiveMetastore_get_open_txns_result__isset __isset;
+  _ThriftHiveMetastore_add_token_result__isset __isset;
 
-  void __set_success(const GetOpenTxnsResponse& val);
+  void __set_success(const bool val);
 
-  bool operator == (const ThriftHiveMetastore_get_open_txns_result & rhs) const
+  bool operator == (const ThriftHiveMetastore_add_token_result & rhs) const
   {
     if (!(success == rhs.success))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_get_open_txns_result &rhs) const {
+  bool operator != (const ThriftHiveMetastore_add_token_result &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_get_open_txns_result & ) const;
+  bool operator < (const ThriftHiveMetastore_add_token_result & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_get_open_txns_presult__isset {
-  _ThriftHiveMetastore_get_open_txns_presult__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_add_token_presult__isset {
+  _ThriftHiveMetastore_add_token_presult__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_get_open_txns_presult__isset;
+} _ThriftHiveMetastore_add_token_presult__isset;
 
-class ThriftHiveMetastore_get_open_txns_presult {
+class ThriftHiveMetastore_add_token_presult {
  public:
 
 
-  virtual ~ThriftHiveMetastore_get_open_txns_presult() throw();
-  GetOpenTxnsResponse* success;
+  virtual ~ThriftHiveMetastore_add_token_presult() throw();
+  bool* success;
 
-  _ThriftHiveMetastore_get_open_txns_presult__isset __isset;
+  _ThriftHiveMetastore_add_token_presult__isset __isset;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 
 };
 
+typedef struct _ThriftHiveMetastore_remove_token_args__isset {
+  _ThriftHiveMetastore_remove_token_args__isset() : token_identifier(false) {}
+  bool token_identifier :1;
+} _ThriftHiveMetastore_remove_token_args__isset;
 
-class ThriftHiveMetastore_get_open_txns_info_args {
+class ThriftHiveMetastore_remove_token_args {
  public:
 
-  ThriftHiveMetastore_get_open_txns_info_args(const ThriftHiveMetastore_get_open_txns_info_args&);
-  ThriftHiveMetastore_get_open_txns_info_args& operator=(const ThriftHiveMetastore_get_open_txns_info_args&);
-  ThriftHiveMetastore_get_open_txns_info_args() {
+  ThriftHiveMetastore_remove_token_args(const ThriftHiveMetastore_remove_token_args&);
+  ThriftHiveMetastore_remove_token_args& operator=(const ThriftHiveMetastore_remove_token_args&);
+  ThriftHiveMetastore_remove_token_args() : token_identifier() {
   }
 
-  virtual ~ThriftHiveMetastore_get_open_txns_info_args() throw();
+  virtual ~ThriftHiveMetastore_remove_token_args() throw();
+  std::string token_identifier;
 
-  bool operator == (const ThriftHiveMetastore_get_open_txns_info_args & /* rhs */) const
+  _ThriftHiveMetastore_remove_token_args__isset __isset;
+
+  void __set_token_identifier(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_remove_token_args & rhs) const
   {
+    if (!(token_identifier == rhs.token_identifier))
+      return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_get_open_txns_info_args &rhs) const {
+  bool operator != (const ThriftHiveMetastore_remove_token_args &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_get_open_txns_info_args & ) const;
+  bool operator < (const ThriftHiveMetastore_remove_token_args & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -15142,102 +15208,103 @@ class ThriftHiveMetastore_get_open_txns_info_args {
 };
 
 
-class ThriftHiveMetastore_get_open_txns_info_pargs {
+class ThriftHiveMetastore_remove_token_pargs {
  public:
 
 
-  virtual ~ThriftHiveMetastore_get_open_txns_info_pargs() throw();
+  virtual ~ThriftHiveMetastore_remove_token_pargs() throw();
+  const std::string* token_identifier;
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_get_open_txns_info_result__isset {
-  _ThriftHiveMetastore_get_open_txns_info_result__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_remove_token_result__isset {
+  _ThriftHiveMetastore_remove_token_result__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_get_open_txns_info_result__isset;
+} _ThriftHiveMetastore_remove_token_result__isset;
 
-class ThriftHiveMetastore_get_open_txns_info_result {
+class ThriftHiveMetastore_remove_token_result {
  public:
 
-  ThriftHiveMetastore_get_open_txns_info_result(const ThriftHiveMetastore_get_open_txns_info_result&);
-  ThriftHiveMetastore_get_open_txns_info_result& operator=(const ThriftHiveMetastore_get_open_txns_info_result&);
-  ThriftHiveMetastore_get_open_txns_info_result() {
+  ThriftHiveMetastore_remove_token_result(const ThriftHiveMetastore_remove_token_result&);
+  ThriftHiveMetastore_remove_token_result& operator=(const ThriftHiveMetastore_remove_token_result&);
+  ThriftHiveMetastore_remove_token_result() : success(0) {
   }
 
-  virtual ~ThriftHiveMetastore_get_open_txns_info_result() throw();
-  GetOpenTxnsInfoResponse success;
+  virtual ~ThriftHiveMetastore_remove_token_result() throw();
+  bool success;
 
-  _ThriftHiveMetastore_get_open_txns_info_result__isset __isset;
+  _ThriftHiveMetastore_remove_token_result__isset __isset;
 
-  void __set_success(const GetOpenTxnsInfoResponse& val);
+  void __set_success(const bool val);
 
-  bool operator == (const ThriftHiveMetastore_get_open_txns_info_result & rhs) const
+  bool operator == (const ThriftHiveMetastore_remove_token_result & rhs) const
   {
     if (!(success == rhs.success))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_get_open_txns_info_result &rhs) const {
+  bool operator != (const ThriftHiveMetastore_remove_token_result &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_get_open_txns_info_result & ) const;
+  bool operator < (const ThriftHiveMetastore_remove_token_result & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_get_open_txns_info_presult__isset {
-  _ThriftHiveMetastore_get_open_txns_info_presult__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_remove_token_presult__isset {
+  _ThriftHiveMetastore_remove_token_presult__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_get_open_txns_info_presult__isset;
+} _ThriftHiveMetastore_remove_token_presult__isset;
 
-class ThriftHiveMetastore_get_open_txns_info_presult {
+class ThriftHiveMetastore_remove_token_presult {
  public:
 
 
-  virtual ~ThriftHiveMetastore_get_open_txns_info_presult() throw();
-  GetOpenTxnsInfoResponse* success;
+  virtual ~ThriftHiveMetastore_remove_token_presult() throw();
+  bool* success;
 
-  _ThriftHiveMetastore_get_open_txns_info_presult__isset __isset;
+  _ThriftHiveMetastore_remove_token_presult__isset __isset;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 
 };
 
-typedef struct _ThriftHiveMetastore_open_txns_args__isset {
-  _ThriftHiveMetastore_open_txns_args__isset() : rqst(false) {}
-  bool rqst :1;
-} _ThriftHiveMetastore_open_txns_args__isset;
+typedef struct _ThriftHiveMetastore_get_token_args__isset {
+  _ThriftHiveMetastore_get_token_args__isset() : token_identifier(false) {}
+  bool token_identifier :1;
+} _ThriftHiveMetastore_get_token_args__isset;
 
-class ThriftHiveMetastore_open_txns_args {
+class ThriftHiveMetastore_get_token_args {
  public:
 
-  ThriftHiveMetastore_open_txns_args(const ThriftHiveMetastore_open_txns_args&);
-  ThriftHiveMetastore_open_txns_args& operator=(const ThriftHiveMetastore_open_txns_args&);
-  ThriftHiveMetastore_open_txns_args() {
+  ThriftHiveMetastore_get_token_args(const ThriftHiveMetastore_get_token_args&);
+  ThriftHiveMetastore_get_token_args& operator=(const ThriftHiveMetastore_get_token_args&);
+  ThriftHiveMetastore_get_token_args() : token_identifier() {
   }
 
-  virtual ~ThriftHiveMetastore_open_txns_args() throw();
-  OpenTxnRequest rqst;
+  virtual ~ThriftHiveMetastore_get_token_args() throw();
+  std::string token_identifier;
 
-  _ThriftHiveMetastore_open_txns_args__isset __isset;
+  _ThriftHiveMetastore_get_token_args__isset __isset;
 
-  void __set_rqst(const OpenTxnRequest& val);
+  void __set_token_identifier(const std::string& val);
 
-  bool operator == (const ThriftHiveMetastore_open_txns_args & rhs) const
+  bool operator == (const ThriftHiveMetastore_get_token_args & rhs) const
   {
-    if (!(rqst == rhs.rqst))
+    if (!(token_identifier == rhs.token_identifier))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_open_txns_args &rhs) const {
+  bool operator != (const ThriftHiveMetastore_get_token_args &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_open_txns_args & ) const;
+  bool operator < (const ThriftHiveMetastore_get_token_args & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -15245,103 +15312,92 @@ class ThriftHiveMetastore_open_txns_args {
 };
 
 
-class ThriftHiveMetastore_open_txns_pargs {
+class ThriftHiveMetastore_get_token_pargs {
  public:
 
 
-  virtual ~ThriftHiveMetastore_open_txns_pargs() throw();
-  const OpenTxnRequest* rqst;
+  virtual ~ThriftHiveMetastore_get_token_pargs() throw();
+  const std::string* token_identifier;
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_open_txns_result__isset {
-  _ThriftHiveMetastore_open_txns_result__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_get_token_result__isset {
+  _ThriftHiveMetastore_get_token_result__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_open_txns_result__isset;
+} _ThriftHiveMetastore_get_token_result__isset;
 
-class ThriftHiveMetastore_open_txns_result {
+class ThriftHiveMetastore_get_token_result {
  public:
 
-  ThriftHiveMetastore_open_txns_result(const ThriftHiveMetastore_open_txns_result&);
-  ThriftHiveMetastore_open_txns_result& operator=(const ThriftHiveMetastore_open_txns_result&);
-  ThriftHiveMetastore_open_txns_result() {
+  ThriftHiveMetastore_get_token_result(const ThriftHiveMetastore_get_token_result&);
+  ThriftHiveMetastore_get_token_result& operator=(const ThriftHiveMetastore_get_token_result&);
+  ThriftHiveMetastore_get_token_result() : success() {
   }
 
-  virtual ~ThriftHiveMetastore_open_txns_result() throw();
-  OpenTxnsResponse success;
+  virtual ~ThriftHiveMetastore_get_token_result() throw();
+  std::string success;
 
-  _ThriftHiveMetastore_open_txns_result__isset __isset;
+  _ThriftHiveMetastore_get_token_result__isset __isset;
 
-  void __set_success(const OpenTxnsResponse& val);
+  void __set_success(const std::string& val);
 
-  bool operator == (const ThriftHiveMetastore_open_txns_result & rhs) const
+  bool operator == (const ThriftHiveMetastore_get_token_result & rhs) const
   {
     if (!(success == rhs.success))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_open_txns_result &rhs) const {
+  bool operator != (const ThriftHiveMetastore_get_token_result &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_open_txns_result & ) const;
+  bool operator < (const ThriftHiveMetastore_get_token_result & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_open_txns_presult__isset {
-  _ThriftHiveMetastore_open_txns_presult__isset() : success(false) {}
+typedef struct _ThriftHiveMetastore_get_token_presult__isset {
+  _ThriftHiveMetastore_get_token_presult__isset() : success(false) {}
   bool success :1;
-} _ThriftHiveMetastore_open_txns_presult__isset;
+} _ThriftHiveMetastore_get_token_presult__isset;
 
-class ThriftHiveMetastore_open_txns_presult {
+class ThriftHiveMetastore_get_token_presult {
  public:
 
 
-  virtual ~ThriftHiveMetastore_open_txns_presult() throw();
-  OpenTxnsResponse* success;
+  virtual ~ThriftHiveMetastore_get_token_presult() throw();
+  std::string* success;
 
-  _ThriftHiveMetastore_open_txns_presult__isset __isset;
+  _ThriftHiveMetastore_get_token_presult__isset __isset;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 
 };
 
-typedef struct _ThriftHiveMetastore_abort_txn_args__isset {
-  _ThriftHiveMetastore_abort_txn_args__isset() : rqst(false) {}
-  bool rqst :1;
-} _ThriftHiveMetastore_abort_txn_args__isset;
 
-class ThriftHiveMetastore_abort_txn_args {
+class ThriftHiveMetastore_get_all_token_identifiers_args {
  public:
 
-  ThriftHiveMetastore_abort_txn_args(const ThriftHiveMetastore_abort_txn_args&);
-  ThriftHiveMetastore_abort_txn_args& operator=(const ThriftHiveMetastore_abort_txn_args&);
-  ThriftHiveMetastore_abort_txn_args() {
+  ThriftHiveMetastore_get_all_token_identifiers_args(const ThriftHiveMetastore_get_all_token_identifiers_args&);
+  ThriftHiveMetastore_get_all_token_identifiers_args& operator=(const ThriftHiveMetastore_get_all_token_identifiers_args&);
+  ThriftHiveMetastore_get_all_token_identifiers_args() {
   }
 
-  virtual ~ThriftHiveMetastore_abort_txn_args() throw();
-  AbortTxnRequest rqst;
-
-  _ThriftHiveMetastore_abort_txn_args__isset __isset;
-
-  void __set_rqst(const AbortTxnRequest& val);
+  virtual ~ThriftHiveMetastore_get_all_token_identifiers_args() throw();
 
-  bool operator == (const ThriftHiveMetastore_abort_txn_args & rhs) const
+  bool operator == (const ThriftHiveMetastore_get_all_token_identifiers_args & /* rhs */) const
   {
-    if (!(rqst == rhs.rqst))
-      return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_abort_txn_args &rhs) const {
+  bool operator != (const ThriftHiveMetastore_get_all_token_identifiers_args &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_abort_txn_args & ) const;
+  bool operator < (const ThriftHiveMetastore_get_all_token_identifiers_args & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -15349,103 +15405,102 @@ class ThriftHiveMetastore_abort_txn_args {
 };
 
 
-class ThriftHiveMetastore_abort_txn_pargs {
+class ThriftHiveMetastore_get_all_token_identifiers_pargs {
  public:
 
 
-  virtual ~ThriftHiveMetastore_abort_txn_pargs() throw();
-  const AbortTxnRequest* rqst;
+  virtual ~ThriftHiveMetastore_get_all_token_identifiers_pargs() throw();
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_abort_txn_result__isset {
-  _ThriftHiveMetastore_abort_txn_result__isset() : o1(false) {}
-  bool o1 :1;
-} _ThriftHiveMetastore_abort_txn_result__isset;
+typedef struct _ThriftHiveMetastore_get_all_token_identifiers_result__isset {
+  _ThriftHiveMetastore_get_all_token_identifiers_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_all_token_identifiers_result__isset;
 
-class ThriftHiveMetastore_abort_txn_result {
+class ThriftHiveMetastore_get_all_token_identifiers_result {
  public:
 
-  ThriftHiveMetastore_abort_txn_result(const ThriftHiveMetastore_abort_txn_result&);
-  ThriftHiveMetastore_abort_txn_result& operator=(const ThriftHiveMetastore_abort_txn_result&);
-  ThriftHiveMetastore_abort_txn_result() {
+  ThriftHiveMetastore_get_all_token_identifiers_result(const ThriftHiveMetastore_get_all_token_identifiers_result&);
+  ThriftHiveMetastore_get_all_token_identifiers_result& operator=(const ThriftHiveMetastore_get_all_token_identifiers_result&);
+  ThriftHiveMetastore_get_all_token_identifiers_result() {
   }
 
-  virtual ~ThriftHiveMetastore_abort_txn_result() throw();
-  NoSuchTxnException o1;
+  virtual ~ThriftHiveMetastore_get_all_token_identifiers_result() throw();
+  std::vector<std::string>  success;
 
-  _ThriftHiveMetastore_abort_txn_result__isset __isset;
+  _ThriftHiveMetastore_get_all_token_identifiers_result__isset __isset;
 
-  void __set_o1(const NoSuchTxnException& val);
+  void __set_success(const std::vector<std::string> & val);
 
-  bool operator == (const ThriftHiveMetastore_abort_txn_result & rhs) const
+  bool operator == (const ThriftHiveMetastore_get_all_token_identifiers_result & rhs) const
   {
-    if (!(o1 == rhs.o1))
+    if (!(success == rhs.success))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_abort_txn_result &rhs) const {
+  bool operator != (const ThriftHiveMetastore_get_all_token_identifiers_result &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_abort_txn_result & ) const;
+  bool operator < (const ThriftHiveMetastore_get_all_token_identifiers_result & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_abort_txn_presult__isset {
-  _ThriftHiveMetastore_abort_txn_presult__isset() : o1(false) {}
-  bool o1 :1;
-} _ThriftHiveMetastore_abort_txn_presult__isset;
+typedef struct _ThriftHiveMetastore_get_all_token_identifiers_presult__isset {
+  _ThriftHiveMetastore_get_all_token_identifiers_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_all_token_identifiers_presult__isset;
 
-class ThriftHiveMetastore_abort_txn_presult {
+class ThriftHiveMetastore_get_all_token_identifiers_presult {
  public:
 
 
-  virtual ~ThriftHiveMetastore_abort_txn_presult() throw();
-  NoSuchTxnException o1;
+  virtual ~ThriftHiveMetastore_get_all_token_identifiers_presult() throw();
+  std::vector<std::string> * success;
 
-  _ThriftHiveMetastore_abort_txn_presult__isset __isset;
+  _ThriftHiveMetastore_get_all_token_identifiers_presult__isset __isset;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 
 };
 
-typedef struct _ThriftHiveMetastore_commit_txn_args__isset {
-  _ThriftHiveMetastore_commit_txn_args__isset() : rqst(false) {}
-  bool rqst :1;
-} _ThriftHiveMetastore_commit_txn_args__isset;
+typedef struct _ThriftHiveMetastore_add_master_key_args__isset {
+  _ThriftHiveMetastore_add_master_key_args__isset() : key(false) {}
+  bool key :1;
+} _ThriftHiveMetastore_add_master_key_args__isset;
 
-class ThriftHiveMetastore_commit_txn_args {
+class ThriftHiveMetastore_add_master_key_args {
  public:
 
-  ThriftHiveMetastore_commit_txn_args(const ThriftHiveMetastore_commit_txn_args&);
-  ThriftHiveMetastore_commit_txn_args& operator=(const ThriftHiveMetastore_commit_txn_args&);
-  ThriftHiveMetastore_commit_txn_args() {
+  ThriftHiveMetastore_add_master_key_args(const ThriftHiveMetastore_add_master_key_args&);
+  ThriftHiveMetastore_add_master_key_args& operator=(const ThriftHiveMetastore_add_master_key_args&);
+  ThriftHiveMetastore_add_master_key_args() : key() {
   }
 
-  virtual ~ThriftHiveMetastore_commit_txn_args() throw();
-  CommitTxnRequest rqst;
+  virtual ~ThriftHiveMetastore_add_master_key_args() throw();
+  std::string key;
 
-  _ThriftHiveMetastore_commit_txn_args__isset __isset;
+  _ThriftHiveMetastore_add_master_key_args__isset __isset;
 
-  void __set_rqst(const CommitTxnRequest& val);
+  void __set_key(const std::string& val);
 
-  bool operator == (const ThriftHiveMetastore_commit_txn_args & rhs) const
+  bool operator == (const ThriftHiveMetastore_add_master_key_args & rhs) const
   {
-    if (!(rqst == rhs.rqst))
+    if (!(key == rhs.key))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_commit_txn_args &rhs) const {
+  bool operator != (const ThriftHiveMetastore_add_master_key_args &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_commit_txn_args & ) const;
+  bool operator < (const ThriftHiveMetastore_add_master_key_args & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
@@ -15453,83 +15508,902 @@ class ThriftHiveMetastore_commit_txn_args {
 };
 
 
-class ThriftHiveMetastore_commit_txn_pargs {
+class ThriftHiveMetastore_add_master_key_pargs {
  public:
 
 
-  virtual ~ThriftHiveMetastore_commit_txn_pargs() throw();
-  const CommitTxnRequest* rqst;
+  virtual ~ThriftHiveMetastore_add_master_key_pargs() throw();
+  const std::string* key;
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_commit_txn_result__isset {
-  _ThriftHiveMetastore_commit_txn_result__isset() : o1(false), o2(false) {}
+typedef struct _ThriftHiveMetastore_add_master_key_result__isset {
+  _ThriftHiveMetastore_add_master_key_result__isset() : success(false), o1(false) {}
+  bool success :1;
   bool o1 :1;
-  bool o2 :1;
-} _ThriftHiveMetastore_commit_txn_result__isset;
+} _ThriftHiveMetastore_add_master_key_result__isset;
 
-class ThriftHiveMetastore_commit_txn_result {
+class ThriftHiveMetastore_add_master_key_result {
  public:
 
-  ThriftHiveMetastore_commit_txn_result(const ThriftHiveMetastore_commit_txn_result&);
-  ThriftHiveMetastore_commit_txn_result& operator=(const ThriftHiveMetastore_commit_txn_result&);
-  ThriftHiveMetastore_commit_txn_result() {
+  ThriftHiveMetastore_add_master_key_result(const ThriftHiveMetastore_add_master_key_result&);
+  ThriftHiveMetastore_add_master_key_result& operator=(const ThriftHiveMetastore_add_master_key_result&);
+  ThriftHiveMetastore_add_master_key_result() : success(0) {
   }
 
-  virtual ~ThriftHiveMetastore_commit_txn_result() throw();
-  NoSuchTxnException o1;
-  TxnAbortedException o2;
+  virtual ~ThriftHiveMetastore_add_master_key_result() throw();
+  int32_t success;
+  MetaException o1;
 
-  _ThriftHiveMetastore_commit_txn_result__isset __isset;
+  _ThriftHiveMetastore_add_master_key_result__isset __isset;
 
-  void __set_o1(const NoSuchTxnException& val);
+  void __set_success(const int32_t val);
 
-  void __set_o2(const TxnAbortedException& val);
+  void __set_o1(const MetaException& val);
 
-  bool operator == (const ThriftHiveMetastore_commit_txn_result & rhs) const
+  bool operator == (const ThriftHiveMetastore_add_master_key_result & rhs) const
   {
-    if (!(o1 == rhs.o1))
+    if (!(success == rhs.success))
       return false;
-    if (!(o2 == rhs.o2))
+    if (!(o1 == rhs.o1))
       return false;
     return true;
   }
-  bool operator != (const ThriftHiveMetastore_commit_txn_result &rhs) const {
+  bool operator != (const ThriftHiveMetastore_add_master_key_result &rhs) const {
     return !(*this == rhs);
   }
 
-  bool operator < (const ThriftHiveMetastore_commit_txn_result & ) const;
+  bool operator < (const ThriftHiveMetastore_add_master_key_result & ) const;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
 };
 
-typedef struct _ThriftHiveMetastore_commit_txn_presult__isset {
-  _ThriftHiveMetastore_commit_txn_presult__isset() : o1(false), o2(false) {}
+typedef struct _ThriftHiveMetastore_add_master_key_presult__isset {
+  _ThriftHiveMetastore_add_master_key_presult__isset() : success(false), o1(false) {}
+  bool success :1;
   bool o1 :1;
-  bool o2 :1;
-} _ThriftHiveMetastore_commit_txn_presult__isset;
+} _ThriftHiveMetastore_add_master_key_presult__isset;
 
-class ThriftHiveMetastore_commit_txn_presult {
+class ThriftHiveMetastore_add_master_key_presult {
  public:
 
 
-  virtual ~ThriftHiveMetastore_commit_txn_presult() throw();
-  NoSuchTxnException o1;
-  TxnAbortedException o2;
+  virtual ~ThriftHiveMetastore_add_master_key_presult() throw();
+  int32_t* success;
+  MetaException o1;
 
-  _ThriftHiveMetastore_commit_txn_presult__isset __isset;
+  _ThriftHiveMetastore_add_master_key_presult__isset __isset;
 
   uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
 
 };
 
-typedef struct _ThriftHiveMetastore_lock_args__isset {
-  _ThriftHiveMetastore_lock_args__isset() : rqst(false) {}
-  bool rqst :1;
+typedef struct _ThriftHiveMetastore_update_master_key_args__isset {
+  _ThriftHiveMetastore_update_master_key_args__isset() : seq_number(false), key(false) {}
+  bool seq_number :1;
+  bool key :1;
+} _ThriftHiveMetastore_update_master_key_args__isset;
+
+class ThriftHiveMetastore_update_master_key_args {
+ public:
+
+  ThriftHiveMetastore_update_master_key_args(const ThriftHiveMetastore_update_master_key_args&);
+  ThriftHiveMetastore_update_master_key_args& operator=(const ThriftHiveMetastore_update_master_key_args&);
+  ThriftHiveMetastore_update_master_key_args() : seq_number(0), key() {
+  }
+
+  virtual ~ThriftHiveMetastore_update_master_key_args() throw();
+  int32_t seq_number;
+  std::string key;
+
+  _ThriftHiveMetastore_update_master_key_args__isset __isset;
+
+  void __set_seq_number(const int32_t val);
+
+  void __set_key(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_update_master_key_args & rhs) const
+  {
+    if (!(seq_number == rhs.seq_number))
+      return false;
+    if (!(key == rhs.key))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_update_master_key_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_update_master_key_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_update_master_key_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_update_master_key_pargs() throw();
+  const int32_t* seq_number;
+  const std::string* key;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_master_key_result__isset {
+  _ThriftHiveMetastore_update_master_key_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_update_master_key_result__isset;
+
+class ThriftHiveMetastore_update_master_key_result {
+ public:
+
+  ThriftHiveMetastore_update_master_key_result(const ThriftHiveMetastore_update_master_key_result&);
+  ThriftHiveMetastore_update_master_key_result& operator=(const ThriftHiveMetastore_update_master_key_result&);
+  ThriftHiveMetastore_update_master_key_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_update_master_key_result() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_update_master_key_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_update_master_key_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_update_master_key_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_update_master_key_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_update_master_key_presult__isset {
+  _ThriftHiveMetastore_update_master_key_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_update_master_key_presult__isset;
+
+class ThriftHiveMetastore_update_master_key_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_update_master_key_presult() throw();
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_update_master_key_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_remove_master_key_args__isset {
+  _ThriftHiveMetastore_remove_master_key_args__isset() : key_seq(false) {}
+  bool key_seq :1;
+} _ThriftHiveMetastore_remove_master_key_args__isset;
+
+class ThriftHiveMetastore_remove_master_key_args {
+ public:
+
+  ThriftHiveMetastore_remove_master_key_args(const ThriftHiveMetastore_remove_master_key_args&);
+  ThriftHiveMetastore_remove_master_key_args& operator=(const ThriftHiveMetastore_remove_master_key_args&);
+  ThriftHiveMetastore_remove_master_key_args() : key_seq(0) {
+  }
+
+  virtual ~ThriftHiveMetastore_remove_master_key_args() throw();
+  int32_t key_seq;
+
+  _ThriftHiveMetastore_remove_master_key_args__isset __isset;
+
+  void __set_key_seq(const int32_t val);
+
+  bool operator == (const ThriftHiveMetastore_remove_master_key_args & rhs) const
+  {
+    if (!(key_seq == rhs.key_seq))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_remove_master_key_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_remove_master_key_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_remove_master_key_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_remove_master_key_pargs() throw();
+  const int32_t* key_seq;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_remove_master_key_result__isset {
+  _ThriftHiveMetastore_remove_master_key_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_remove_master_key_result__isset;
+
+class ThriftHiveMetastore_remove_master_key_result {
+ public:
+
+  ThriftHiveMetastore_remove_master_key_result(const ThriftHiveMetastore_remove_master_key_result&);
+  ThriftHiveMetastore_remove_master_key_result& operator=(const ThriftHiveMetastore_remove_master_key_result&);
+  ThriftHiveMetastore_remove_master_key_result() : success(0) {
+  }
+
+  virtual ~ThriftHiveMetastore_remove_master_key_result() throw();
+  bool success;
+
+  _ThriftHiveMetastore_remove_master_key_result__isset __isset;
+
+  void __set_success(const bool val);
+
+  bool operator == (const ThriftHiveMetastore_remove_master_key_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_remove_master_key_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_remove_master_key_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_remove_master_key_presult__isset {
+  _ThriftHiveMetastore_remove_master_key_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_remove_master_key_presult__isset;
+
+class ThriftHiveMetastore_remove_master_key_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_remove_master_key_presult() throw();
+  bool* success;
+
+  _ThriftHiveMetastore_remove_master_key_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class ThriftHiveMetastore_get_master_keys_args {
+ public:
+
+  ThriftHiveMetastore_get_master_keys_args(const ThriftHiveMetastore_get_master_keys_args&);
+  ThriftHiveMetastore_get_master_keys_args& operator=(const ThriftHiveMetastore_get_master_keys_args&);
+  ThriftHiveMetastore_get_master_keys_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_master_keys_args() throw();
+
+  bool operator == (const ThriftHiveMetastore_get_master_keys_args & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_master_keys_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_master_keys_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_master_keys_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_master_keys_pargs() throw();
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_master_keys_result__isset {
+  _ThriftHiveMetastore_get_master_keys_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_master_keys_result__isset;
+
+class ThriftHiveMetastore_get_master_keys_result {
+ public:
+
+  ThriftHiveMetastore_get_master_keys_result(const ThriftHiveMetastore_get_master_keys_result&);
+  ThriftHiveMetastore_get_master_keys_result& operator=(const ThriftHiveMetastore_get_master_keys_result&);
+  ThriftHiveMetastore_get_master_keys_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_master_keys_result() throw();
+  std::vector<std::string>  success;
+
+  _ThriftHiveMetastore_get_master_keys_result__isset __isset;
+
+  void __set_success(const std::vector<std::string> & val);
+
+  bool operator == (const ThriftHiveMetastore_get_master_keys_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_master_keys_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_master_keys_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_master_keys_presult__isset {
+  _ThriftHiveMetastore_get_master_keys_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_master_keys_presult__isset;
+
+class ThriftHiveMetastore_get_master_keys_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_master_keys_presult() throw();
+  std::vector<std::string> * success;
+
+  _ThriftHiveMetastore_get_master_keys_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class ThriftHiveMetastore_get_open_txns_args {
+ public:
+
+  ThriftHiveMetastore_get_open_txns_args(const ThriftHiveMetastore_get_open_txns_args&);
+  ThriftHiveMetastore_get_open_txns_args& operator=(const ThriftHiveMetastore_get_open_txns_args&);
+  ThriftHiveMetastore_get_open_txns_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_open_txns_args() throw();
+
+  bool operator == (const ThriftHiveMetastore_get_open_txns_args & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_open_txns_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_open_txns_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_open_txns_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_open_txns_pargs() throw();
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_open_txns_result__isset {
+  _ThriftHiveMetastore_get_open_txns_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_open_txns_result__isset;
+
+class ThriftHiveMetastore_get_open_txns_result {
+ public:
+
+  ThriftHiveMetastore_get_open_txns_result(const ThriftHiveMetastore_get_open_txns_result&);
+  ThriftHiveMetastore_get_open_txns_result& operator=(const ThriftHiveMetastore_get_open_txns_result&);
+  ThriftHiveMetastore_get_open_txns_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_open_txns_result() throw();
+  GetOpenTxnsResponse success;
+
+  _ThriftHiveMetastore_get_open_txns_result__isset __isset;
+
+  void __set_success(const GetOpenTxnsResponse& val);
+
+  bool operator == (const ThriftHiveMetastore_get_open_txns_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_open_txns_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_open_txns_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_open_txns_presult__isset {
+  _ThriftHiveMetastore_get_open_txns_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_open_txns_presult__isset;
+
+class ThriftHiveMetastore_get_open_txns_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_open_txns_presult() throw();
+  GetOpenTxnsResponse* success;
+
+  _ThriftHiveMetastore_get_open_txns_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class ThriftHiveMetastore_get_open_txns_info_args {
+ public:
+
+  ThriftHiveMetastore_get_open_txns_info_args(const ThriftHiveMetastore_get_open_txns_info_args&);
+  ThriftHiveMetastore_get_open_txns_info_args& operator=(const ThriftHiveMetastore_get_open_txns_info_args&);
+  ThriftHiveMetastore_get_open_txns_info_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_open_txns_info_args() throw();
+
+  bool operator == (const ThriftHiveMetastore_get_open_txns_info_args & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_open_txns_info_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_open_txns_info_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_open_txns_info_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_open_txns_info_pargs() throw();
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_open_txns_info_result__isset {
+  _ThriftHiveMetastore_get_open_txns_info_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_open_txns_info_result__isset;
+
+class ThriftHiveMetastore_get_open_txns_info_result {
+ public:
+
+  ThriftHiveMetastore_get_open_txns_info_result(const ThriftHiveMetastore_get_open_txns_info_result&);
+  ThriftHiveMetastore_get_open_txns_info_result& operator=(const ThriftHiveMetastore_get_open_txns_info_result&);
+  ThriftHiveMetastore_get_open_txns_info_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_open_txns_info_result() throw();
+  GetOpenTxnsInfoResponse success;
+
+  _ThriftHiveMetastore_get_open_txns_info_result__isset __isset;
+
+  void __set_success(const GetOpenTxnsInfoResponse& val);
+
+  bool operator == (const ThriftHiveMetastore_get_open_txns_info_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_open_txns_info_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_open_txns_info_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_open_txns_info_presult__isset {
+  _ThriftHiveMetastore_get_open_txns_info_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_get_open_txns_info_presult__isset;
+
+class ThriftHiveMetastore_get_open_txns_info_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_open_txns_info_presult() throw();
+  GetOpenTxnsInfoResponse* success;
+
+  _ThriftHiveMetastore_get_open_txns_info_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_open_txns_args__isset {
+  _ThriftHiveMetastore_open_txns_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_open_txns_args__isset;
+
+class ThriftHiveMetastore_open_txns_args {
+ public:
+
+  ThriftHiveMetastore_open_txns_args(const ThriftHiveMetastore_open_txns_args&);
+  ThriftHiveMetastore_open_txns_args& operator=(const ThriftHiveMetastore_open_txns_args&);
+  ThriftHiveMetastore_open_txns_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_open_txns_args() throw();
+  OpenTxnRequest rqst;
+
+  _ThriftHiveMetastore_open_txns_args__isset __isset;
+
+  void __set_rqst(const OpenTxnRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_open_txns_args & rhs) const
+  {
+    if (!(rqst == rhs.rqst))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_open_txns_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_open_txns_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_open_txns_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_open_txns_pargs() throw();
+  const OpenTxnRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_open_txns_result__isset {
+  _ThriftHiveMetastore_open_txns_result__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_open_txns_result__isset;
+
+class ThriftHiveMetastore_open_txns_result {
+ public:
+
+  ThriftHiveMetastore_open_txns_result(const ThriftHiveMetastore_open_txns_result&);
+  ThriftHiveMetastore_open_txns_result& operator=(const ThriftHiveMetastore_open_txns_result&);
+  ThriftHiveMetastore_open_txns_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_open_txns_result() throw();
+  OpenTxnsResponse success;
+
+  _ThriftHiveMetastore_open_txns_result__isset __isset;
+
+  void __set_success(const OpenTxnsResponse& val);
+
+  bool operator == (const ThriftHiveMetastore_open_txns_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_open_txns_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_open_txns_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_open_txns_presult__isset {
+  _ThriftHiveMetastore_open_txns_presult__isset() : success(false) {}
+  bool success :1;
+} _ThriftHiveMetastore_open_txns_presult__isset;
+
+class ThriftHiveMetastore_open_txns_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_open_txns_presult() throw();
+  OpenTxnsResponse* success;
+
+  _ThriftHiveMetastore_open_txns_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txn_args__isset {
+  _ThriftHiveMetastore_abort_txn_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_abort_txn_args__isset;
+
+class ThriftHiveMetastore_abort_txn_args {
+ public:
+
+  ThriftHiveMetastore_abort_txn_args(const ThriftHiveMetastore_abort_txn_args&);
+  ThriftHiveMetastore_abort_txn_args& operator=(const ThriftHiveMetastore_abort_txn_args&);
+  ThriftHiveMetastore_abort_txn_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txn_args() throw();
+  AbortTxnRequest rqst;
+
+  _ThriftHiveMetastore_abort_txn_args__isset __isset;
+
+  void __set_rqst(const AbortTxnRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txn_args & rhs) const
+  {
+    if (!(rqst == rhs.rqst))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txn_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txn_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_abort_txn_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txn_pargs() throw();
+  const AbortTxnRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txn_result__isset {
+  _ThriftHiveMetastore_abort_txn_result__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txn_result__isset;
+
+class ThriftHiveMetastore_abort_txn_result {
+ public:
+
+  ThriftHiveMetastore_abort_txn_result(const ThriftHiveMetastore_abort_txn_result&);
+  ThriftHiveMetastore_abort_txn_result& operator=(const ThriftHiveMetastore_abort_txn_result&);
+  ThriftHiveMetastore_abort_txn_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_abort_txn_result() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txn_result__isset __isset;
+
+  void __set_o1(const NoSuchTxnException& val);
+
+  bool operator == (const ThriftHiveMetastore_abort_txn_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_abort_txn_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_abort_txn_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_abort_txn_presult__isset {
+  _ThriftHiveMetastore_abort_txn_presult__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_abort_txn_presult__isset;
+
+class ThriftHiveMetastore_abort_txn_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_abort_txn_presult() throw();
+  NoSuchTxnException o1;
+
+  _ThriftHiveMetastore_abort_txn_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_commit_txn_args__isset {
+  _ThriftHiveMetastore_commit_txn_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_commit_txn_args__isset;
+
+class ThriftHiveMetastore_commit_txn_args {
+ public:
+
+  ThriftHiveMetastore_commit_txn_args(const ThriftHiveMetastore_commit_txn_args&);
+  ThriftHiveMetastore_commit_txn_args& operator=(const ThriftHiveMetastore_commit_txn_args&);
+  ThriftHiveMetastore_commit_txn_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_commit_txn_args() throw();
+  CommitTxnRequest rqst;
+
+  _ThriftHiveMetastore_commit_txn_args__isset __isset;
+
+  void __set_rqst(const CommitTxnRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_commit_txn_args & rhs) const
+  {
+    if (!(rqst == rhs.rqst))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_commit_txn_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_commit_txn_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_commit_txn_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_commit_txn_pargs() throw();
+  const CommitTxnRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_commit_txn_result__isset {
+  _ThriftHiveMetastore_commit_txn_result__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_commit_txn_result__isset;
+
+class ThriftHiveMetastore_commit_txn_result {
+ public:
+
+  ThriftHiveMetastore_commit_txn_result(const ThriftHiveMetastore_commit_txn_result&);
+  ThriftHiveMetastore_commit_txn_result& operator=(const ThriftHiveMetastore_commit_txn_result&);
+  ThriftHiveMetastore_commit_txn_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_commit_txn_result() throw();
+  NoSuchTxnException o1;
+  TxnAbortedException o2;
+
+  _ThriftHiveMetastore_commit_txn_result__isset __isset;
+
+  void __set_o1(const NoSuchTxnException& val);
+
+  void __set_o2(const TxnAbortedException& val);
+
+  bool operator == (const ThriftHiveMetastore_commit_txn_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_commit_txn_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_commit_txn_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_commit_txn_presult__isset {
+  _ThriftHiveMetastore_commit_txn_presult__isset() : o1(false), o2(false) {}
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_commit_txn_presult__isset;
+
+class ThriftHiveMetastore_commit_txn_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_commit_txn_presult() throw();
+  NoSuchTxnException o1;
+  TxnAbortedException o2;
+
+  _ThriftHiveMetastore_commit_txn_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_lock_args__isset {
+  _ThriftHiveMetastore_lock_args__isset() : rqst(false) {}
+  bool rqst :1;
 } _ThriftHiveMetastore_lock_args__isset;
 
 class ThriftHiveMetastore_lock_args {
@@ -17859,6 +18733,30 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void cancel_delegation_token(const std::string& token_str_form);
   void send_cancel_delegation_token(const std::string& token_str_form);
   void recv_cancel_delegation_token();
+  bool add_token(const std::string& token_identifier, const std::string& delegation_token);
+  void send_add_token(const std::string& token_identifier, const std::string& delegation_token);
+  bool recv_add_token();
+  bool remove_token(const std::string& token_identifier);
+  void send_remove_token(const std::string& token_identifier);
+  bool recv_remove_token();
+  void get_token(std::string& _return, const std::string& token_identifier);
+  void send_get_token(const std::string& token_identifier);
+  void recv_get_token(std::string& _return);
+  void get_all_token_identifiers(std::vector<std::string> & _return);
+  void send_get_all_token_identifiers();
+  void recv_get_all_token_identifiers(std::vector<std::string> & _return);
+  int32_t add_master_key(const std::string& key);
+  void send_add_master_key(const std::string& key);
+  int32_t recv_add_master_key();
+  void update_master_key(const int32_t seq_number, const std::string& key);
+  void send_update_master_key(const int32_t seq_number, const std::string& key);
+  void recv_update_master_key();
+  bool remove_master_key(const int32_t key_seq);
+  void send_remove_master_key(const int32_t key_seq);
+  bool recv_remove_master_key();
+  void get_master_keys(std::vector<std::string> & _return);
+  void send_get_master_keys();
+  void recv_get_master_keys(std::vector<std::string> & _return);
   void get_open_txns(GetOpenTxnsResponse& _return);
   void send_get_open_txns();
   void recv_get_open_txns(GetOpenTxnsResponse& _return);
@@ -18052,6 +18950,14 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
   void process_get_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_renew_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_cancel_delegation_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_add_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_remove_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_get_token(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_get_all_token_identifiers(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_add_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_update_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_remove_master_key(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
+  void process_get_master_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_get_open_txns_info(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
   void process_open_txns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext);
@@ -18191,6 +19097,14 @@ class ThriftHiveMetastoreProcessor : public  ::facebook::fb303::FacebookServiceP
     processMap_["get_delegation_token"] = &ThriftHiveMetastoreProcessor::process_get_delegation_token;
     processMap_["renew_delegation_token"] = &ThriftHiveMetastoreProcessor::process_renew_delegation_token;
     processMap_["cancel_delegation_token"] = &ThriftHiveMetastoreProcessor::process_cancel_delegation_token;
+    processMap_["add_token"] = &ThriftHiveMetastoreProcessor::process_add_token;
+    processMap_["remove_token"] = &ThriftHiveMetastoreProcessor::process_remove_token;
+    processMap_["get_token"] = &ThriftHiveMetastoreProcessor::process_get_token;
+    processMap_["get_all_token_identifiers"] = &ThriftHiveMetastoreProcessor::process_get_all_token_identifiers;
+    processMap_["add_master_key"] = &ThriftHiveMetastoreProcessor::process_add_master_key;
+    processMap_["update_master_key"] = &ThriftHiveMetastoreProcessor::process_update_master_key;
+    processMap_["remove_master_key"] = &ThriftHiveMetastoreProcessor::process_remove_master_key;
+    processMap_["get_master_keys"] = &ThriftHiveMetastoreProcessor::process_get_master_keys;
     processMap_["get_open_txns"] = &ThriftHiveMetastoreProcessor::process_get_open_txns;
     processMap_["get_open_txns_info"] = &ThriftHiveMetastoreProcessor::process_get_open_txns_info;
     processMap_["open_txns"] = &ThriftHiveMetastoreProcessor::process_open_txns;
@@ -19312,6 +20226,81 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     ifaces_[i]->cancel_delegation_token(token_str_form);
   }
 
+  bool add_token(const std::string& token_identifier, const std::string& delegation_token) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->add_token(token_identifier, delegation_token);
+    }
+    return ifaces_[i]->add_token(token_identifier, delegation_token);
+  }
+
+  bool remove_token(const std::string& token_identifier) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->remove_token(token_identifier);
+    }
+    return ifaces_[i]->remove_token(token_identifier);
+  }
+
+  void get_token(std::string& _return, const std::string& token_identifier) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->get_token(_return, token_identifier);
+    }
+    ifaces_[i]->get_token(_return, token_identifier);
+    return;
+  }
+
+  void get_all_token_identifiers(std::vector<std::string> & _return) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->get_all_token_identifiers(_return);
+    }
+    ifaces_[i]->get_all_token_identifiers(_return);
+    return;
+  }
+
+  int32_t add_master_key(const std::string& key) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->add_master_key(key);
+    }
+    return ifaces_[i]->add_master_key(key);
+  }
+
+  void update_master_key(const int32_t seq_number, const std::string& key) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->update_master_key(seq_number, key);
+    }
+    ifaces_[i]->update_master_key(seq_number, key);
+  }
+
+  bool remove_master_key(const int32_t key_seq) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->remove_master_key(key_seq);
+    }
+    return ifaces_[i]->remove_master_key(key_seq);
+  }
+
+  void get_master_keys(std::vector<std::string> & _return) {
+    size_t sz = ifaces_.size();
+    size_t i = 0;
+    for (; i < (sz - 1); ++i) {
+      ifaces_[i]->get_master_keys(_return);
+    }
+    ifaces_[i]->get_master_keys(_return);
+    return;
+  }
+
   void get_open_txns(GetOpenTxnsResponse& _return) {
     size_t sz = ifaces_.size();
     size_t i = 0;
@@ -19894,6 +20883,30 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void cancel_delegation_token(const std::string& token_str_form);
   int32_t send_cancel_delegation_token(const std::string& token_str_form);
   void recv_cancel_delegation_token(const int32_t seqid);
+  bool add_token(const std::string& token_identifier, const std::string& delegation_token);
+  int32_t send_add_token(const std::string& token_identifier, const std::string& delegation_token);
+  bool recv_add_token(const int32_t seqid);
+  bool remove_token(const std::string& token_identifier);
+  int32_t send_remove_token(const std::string& token_identifier);
+  bool recv_remove_token(const int32_t seqid);
+  void get_token(std::string& _return, const std::string& token_identifier);
+  int32_t send_get_token(const std::string& token_identifier);
+  void recv_get_token(std::string& _return, const int32_t seqid);
+  void get_all_token_identifiers(std::vector<std::string> & _return);
+  int32_t send_get_all_token_identifiers();
+  void recv_get_all_token_identifiers(std::vector<std::string> & _return, const int32_t seqid);
+  int32_t add_master_key(const std::string& key);
+  int32_t send_add_master_key(const std::string& key);
+  int32_t recv_add_master_key(const int32_t seqid);
+  void update_master_key(const int32_t seq_number, const std::string& key);
+  int32_t send_update_master_key(const int32_t seq_number, const std::string& key);
+  void recv_update_master_key(const int32_t seqid);
+  bool remove_master_key(const int32_t key_seq);
+  int32_t send_remove_master_key(const int32_t key_seq);
+  bool recv_remove_master_key(const int32_t seqid);
+  void get_master_keys(std::vector<std::string> & _return);
+  int32_t send_get_master_keys();
+  void recv_get_master_keys(std::vector<std::string> & _return, const int32_t seqid);
   void get_open_txns(GetOpenTxnsResponse& _return);
   int32_t send_get_open_txns();
   void recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid);

http://git-wip-us.apache.org/repos/asf/hive/blob/87131d0c/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 6e21a9a..3e7c6e7 100644
--- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -577,6 +577,46 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("cancel_delegation_token\n");
   }
 
+  bool add_token(const std::string& token_identifier, const std::string& delegation_token) {
+    // Your implementation goes here
+    printf("add_token\n");
+  }
+
+  bool remove_token(const std::string& token_identifier) {
+    // Your implementation goes here
+    printf("remove_token\n");
+  }
+
+  void get_token(std::string& _return, const std::string& token_identifier) {
+    // Your implementation goes here
+    printf("get_token\n");
+  }
+
+  void get_all_token_identifiers(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_all_token_identifiers\n");
+  }
+
+  int32_t add_master_key(const std::string& key) {
+    // Your implementation goes here
+    printf("add_master_key\n");
+  }
+
+  void update_master_key(const int32_t seq_number, const std::string& key) {
+    // Your implementation goes here
+    printf("update_master_key\n");
+  }
+
+  bool remove_master_key(const int32_t key_seq) {
+    // Your implementation goes here
+    printf("remove_master_key\n");
+  }
+
+  void get_master_keys(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_master_keys\n");
+  }
+
   void get_open_txns(GetOpenTxnsResponse& _return) {
     // Your implementation goes here
     printf("get_open_txns\n");


[14/51] [abbrv] hive git commit: HIVE-13112 : Expose Lineage information in case of CTAS (Harish Butani via Ashutosh Chauhan)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
index 1f4c83a..93ab473 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -117,6 +165,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
index 58f4fae..c8001e0 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cbigint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
index 0df607d..4f1a98d 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1a
+POSTHOOK: Lineage: small_alltypesorc1a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2a as select * from alltypesorc where cint is null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2a
+POSTHOOK: Lineage: small_alltypesorc2a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3a as select * from alltypesorc where cint is not null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3a
+POSTHOOK: Lineage: small_alltypesorc3a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4a as select * from alltypesorc where cint is null and cstring1 is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4a
+POSTHOOK: Lineage: small_alltypesorc4a.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4a.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1a
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1a
@@ -122,6 +170,18 @@ POSTHOOK: Input: default@small_alltypesorc3a
 POSTHOOK: Input: default@small_alltypesorc4a
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_a
+POSTHOOK: Lineage: small_alltypesorc_a.cbigint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cboolean2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cdouble EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cfloat EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.csmallint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.cstring2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp1 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctimestamp2 EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_a.ctinyint EXPRESSION [(small_alltypesorc1a)small_alltypesorc1a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2a)small_alltypesorc2a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3a)small_alltypesorc3a.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4a)small_alltypesorc4a.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_a COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_a

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
index a6c45b0..a32f585 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out
@@ -10,6 +10,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc1b
+POSTHOOK: Lineage: small_alltypesorc1b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc1b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc2b as select * from alltypesorc where cint is null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -20,6 +32,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc2b
+POSTHOOK: Lineage: small_alltypesorc2b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc2b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc3b as select * from alltypesorc where cint is not null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -30,6 +54,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc3b
+POSTHOOK: Lineage: small_alltypesorc3b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc3b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: create table small_alltypesorc4b as select * from alltypesorc where cint is null and ctinyint is null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@alltypesorc
@@ -40,6 +76,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc4b
+POSTHOOK: Lineage: small_alltypesorc4b.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc4b.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: select * from small_alltypesorc1b
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc1b
@@ -132,6 +180,18 @@ POSTHOOK: Input: default@small_alltypesorc3b
 POSTHOOK: Input: default@small_alltypesorc4b
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_alltypesorc_b
+POSTHOOK: Lineage: small_alltypesorc_b.cbigint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cbigint, type:bigint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean1, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cboolean2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cboolean2, type:boolean, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cdouble EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cdouble, type:double, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cfloat EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cfloat, type:float, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cint, type:int, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.csmallint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:csmallint, type:smallint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring1, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.cstring2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:cstring2, type:string, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp1 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctimestamp2 EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: small_alltypesorc_b.ctinyint EXPRESSION [(small_alltypesorc1b)small_alltypesorc1b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc2b)small_alltypesorc2b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc3b)small_alltypesorc3b.FieldSchema(name:ctinyint, type:tinyint, comment:null), (small_alltypesorc4b)small_alltypesorc4b.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_alltypesorc_b COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_alltypesorc_b

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
index c7a85ee..1b09fda 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join5.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join5.q.out
@@ -18,6 +18,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sorted_mod_4
+POSTHOOK: Lineage: sorted_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: sorted_mod_4.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE sorted_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@sorted_mod_4
@@ -46,6 +48,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table
+POSTHOOK: Lineage: small_table.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table
@@ -665,6 +669,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@mod_8_mod_4
+POSTHOOK: Lineage: mod_8_mod_4.cmodint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: mod_8_mod_4.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE mod_8_mod_4 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@mod_8_mod_4
@@ -693,6 +699,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@small_table2
+POSTHOOK: Lineage: small_table2.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: small_table2.cmodtinyint EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: ANALYZE TABLE small_table2 COMPUTE STATISTICS
 PREHOOK: type: QUERY
 PREHOOK: Input: default@small_table2

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_outer_join6.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
index 02a3f87..8c09716 100644
--- a/ql/src/test/results/clientpositive/vector_outer_join6.q.out
+++ b/ql/src/test/results/clientpositive/vector_outer_join6.q.out
@@ -84,6 +84,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin1_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN1
+POSTHOOK: Lineage: tjoin1.c1 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.c2 SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:c2, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin1.rnum SIMPLE [(tjoin1_txt)tjoin1_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN2 stored as orc AS SELECT * FROM TJOIN2_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin2_txt
@@ -94,6 +97,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin2_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN2
+POSTHOOK: Lineage: tjoin2.c1 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin2.c2 SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin2.rnum SIMPLE [(tjoin2_txt)tjoin2_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN3 stored as orc AS SELECT * FROM TJOIN3_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin3_txt
@@ -104,6 +110,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin3_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN3
+POSTHOOK: Lineage: tjoin3.c1 SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin3.c2 SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin3.rnum SIMPLE [(tjoin3_txt)tjoin3_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: create table TJOIN4 stored as orc AS SELECT * FROM TJOIN4_txt
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@tjoin4_txt
@@ -114,6 +123,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@tjoin4_txt
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@TJOIN4
+POSTHOOK: Lineage: tjoin4.c1 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: tjoin4.c2 SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:c2, type:char(2), comment:null), ]
+POSTHOOK: Lineage: tjoin4.rnum SIMPLE [(tjoin4_txt)tjoin4_txt.FieldSchema(name:rnum, type:int, comment:null), ]
 PREHOOK: query: explain
 select tj1rnum, tj2rnum, tjoin3.rnum as rnumt3 from
    (select tjoin1.rnum tj1rnum, tjoin2.rnum tj2rnum, tjoin2.c1 tj2c1 from tjoin1 left outer join tjoin2 on tjoin1.c1 = tjoin2.c1 ) tj left outer join tjoin3 on tj2c1 = tjoin3.c1

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
index 36d3b1a..09dd873 100644
--- a/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
+++ b/ql/src/test/results/clientpositive/vector_partitioned_date_time.q.out
@@ -52,6 +52,12 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@flights_tiny
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@flights_tiny_orc
+POSTHOOK: Lineage: flights_tiny_orc.arr_delay SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:arr_delay, type:float, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.dest_city_name SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:dest_city_name, type:string, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_date SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:fl_date, type:date, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_num SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:fl_num, type:int, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.fl_time EXPRESSION [(flights_tiny)flights_tiny.FieldSchema(name:fl_date, type:date, comment:null), ]
+POSTHOOK: Lineage: flights_tiny_orc.origin_city_name SIMPLE [(flights_tiny)flights_tiny.FieldSchema(name:origin_city_name, type:string, comment:null), ]
 PREHOOK: query: SELECT * FROM flights_tiny_orc
 PREHOOK: type: QUERY
 PREHOOK: Input: default@flights_tiny_orc

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
index fd1a58e..19a3d50 100644
--- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
+++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out
@@ -10,6 +10,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@decimal_test
+POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN
 SELECT cint, cdouble, cdecimal1, cdecimal2, min(cdecimal1) as min_decimal1 FROM decimal_test
 WHERE cdecimal1 is not null and cdecimal2 is not null

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
index 8806808..5ca6d70 100644
--- a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
+++ b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out
@@ -98,6 +98,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_vc1
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_vc1_orc
+POSTHOOK: Lineage: varchar_join1_vc1_orc.c1 SIMPLE [(varchar_join1_vc1)varchar_join1_vc1.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_vc1_orc.c2 SIMPLE [(varchar_join1_vc1)varchar_join1_vc1.FieldSchema(name:c2, type:varchar(10), comment:null), ]
 PREHOOK: query: create table varchar_join1_vc2_orc stored as orc as select * from varchar_join1_vc2
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@varchar_join1_vc2
@@ -108,6 +110,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_vc2
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_vc2_orc
+POSTHOOK: Lineage: varchar_join1_vc2_orc.c1 SIMPLE [(varchar_join1_vc2)varchar_join1_vc2.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_vc2_orc.c2 SIMPLE [(varchar_join1_vc2)varchar_join1_vc2.FieldSchema(name:c2, type:varchar(20), comment:null), ]
 PREHOOK: query: create table varchar_join1_str_orc stored as orc as select * from varchar_join1_str
 PREHOOK: type: CREATETABLE_AS_SELECT
 PREHOOK: Input: default@varchar_join1_str
@@ -118,6 +122,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@varchar_join1_str
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@varchar_join1_str_orc
+POSTHOOK: Lineage: varchar_join1_str_orc.c1 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c1, type:int, comment:null), ]
+POSTHOOK: Lineage: varchar_join1_str_orc.c2 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c2, type:string, comment:null), ]
 PREHOOK: query: -- Join varchar with same length varchar
 explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
index 93cdb24..6cae52c 100644
--- a/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_decimal_date.q.out
@@ -8,6 +8,10 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@date_decimal_test
+POSTHOOK: Lineage: date_decimal_test.cdate EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdecimal EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: date_decimal_test.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
 PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10
 PREHOOK: type: QUERY
 POSTHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
index 4b8ff69..91c10f0 100644
--- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
+++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
@@ -2972,6 +2972,18 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesnull
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@alltypesnullorc
+POSTHOOK: Lineage: alltypesnullorc.cbigint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cbigint, type:bigint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean1, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cboolean2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cboolean2, type:boolean, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cdouble SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cfloat SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cfloat, type:float, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.csmallint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:csmallint, type:smallint, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring1, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.cstring2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:cstring2, type:string, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp1 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctimestamp2 SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: alltypesnullorc.ctinyint SIMPLE [(alltypesnull)alltypesnull.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
 PREHOOK: query: explain
 select count(*) from alltypesnullorc
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/windowing_navfn.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out
index a79fccc..1729434 100644
--- a/ql/src/test/results/clientpositive/windowing_navfn.q.out
+++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out
@@ -642,6 +642,8 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@src
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@wtest
+POSTHOOK: Lineage: wtest.a SCRIPT []
+POSTHOOK: Lineage: wtest.b SCRIPT []
 PREHOOK: query: select a, b,
 first_value(b) over (partition by a order by b rows between 1 preceding and 1 following ) ,
 first_value(b, true) over (partition by a order by b rows between 1 preceding and 1 following ) ,

http://git-wip-us.apache.org/repos/asf/hive/blob/fdc9cafe/ql/src/test/results/clientpositive/windowing_streaming.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/windowing_streaming.q.out b/ql/src/test/results/clientpositive/windowing_streaming.q.out
index 27dd96f..a74ddb3 100644
--- a/ql/src/test/results/clientpositive/windowing_streaming.q.out
+++ b/ql/src/test/results/clientpositive/windowing_streaming.q.out
@@ -373,6 +373,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sB
+POSTHOOK: Lineage: sb.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: sb.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: sb.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null
 ), ]
 PREHOOK: query: select * from sB
 where ctinyint is null
 PREHOOK: type: QUERY
@@ -441,6 +444,9 @@ POSTHOOK: type: CREATETABLE_AS_SELECT
 POSTHOOK: Input: default@alltypesorc
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@sD
+POSTHOOK: Lineage: sd.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ]
+POSTHOOK: Lineage: sd.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: sd.r SCRIPT [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), (alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null
 ), ]
 PREHOOK: query: select * from sD
 where ctinyint is null
 PREHOOK: type: QUERY


[09/51] [abbrv] hive git commit: HIVE-13204: Vectorization: Add ChainedCheckerFactory for LIKE (Gopal V, reviewed by Sergey Shelukhin)

Posted by jd...@apache.org.
HIVE-13204: Vectorization: Add ChainedCheckerFactory for LIKE (Gopal V, reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/fe14a908
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/fe14a908
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/fe14a908

Branch: refs/heads/llap
Commit: fe14a9088853abc91740a54f4fe100a8a0cfced6
Parents: fe30121
Author: Gopal V <go...@apache.org>
Authored: Tue Mar 8 16:41:27 2016 -0800
Committer: Gopal V <go...@apache.org>
Committed: Tue Mar 8 16:41:27 2016 -0800

----------------------------------------------------------------------
 ...AbstractFilterStringColLikeStringScalar.java | 168 +++++++++++++++----
 .../FilterStringColLikeStringScalar.java        |  18 ++
 .../TestVectorStringExpressions.java            | 156 ++++++++++++++++-
 3 files changed, 303 insertions(+), 39 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/fe14a908/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
index 272ff9c..b70beef 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/AbstractFilterStringColLikeStringScalar.java
@@ -24,10 +24,13 @@ import java.nio.CharBuffer;
 import java.nio.charset.Charset;
 import java.nio.charset.CharsetDecoder;
 import java.nio.charset.CodingErrorAction;
+import java.util.ArrayList;
 import java.util.List;
+import java.util.StringTokenizer;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
 import org.apache.hadoop.hive.ql.exec.vector.VectorExpressionDescriptor;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
@@ -218,8 +221,8 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
   /**
    * Matches the whole string to its pattern.
    */
-  protected static class NoneChecker implements Checker {
-    byte [] byteSub;
+  protected static final class NoneChecker implements Checker {
+    final byte [] byteSub;
 
     NoneChecker(String pattern) {
       try {
@@ -246,8 +249,8 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
   /**
    * Matches the beginning of each string to a pattern.
    */
-  protected static class BeginChecker implements Checker {
-    byte[] byteSub;
+  protected static final class BeginChecker implements Checker {
+    final byte[] byteSub;
 
     BeginChecker(String pattern) {
       try {
@@ -258,23 +261,20 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
     }
 
     public boolean check(byte[] byteS, int start, int len) {
+      int lenSub = byteSub.length;
       if (len < byteSub.length) {
         return false;
       }
-      for (int i = start, j = 0; j < byteSub.length; i++, j++) {
-        if (byteS[i] != byteSub[j]) {
-          return false;
-        }
-      }
-      return true;
+      return StringExpr.equal(byteSub, 0, lenSub, byteS, start, lenSub);
     }
   }
 
   /**
    * Matches the ending of each string to its pattern.
    */
-  protected static class EndChecker implements Checker {
-    byte[] byteSub;
+  protected static final class EndChecker implements Checker {
+    final byte[] byteSub;
+
     EndChecker(String pattern) {
       try {
         byteSub = pattern.getBytes("UTF-8");
@@ -288,21 +288,16 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
       if (len < lenSub) {
         return false;
       }
-      for (int i = start + len - lenSub, j = 0; j < lenSub; i++, j++) {
-        if (byteS[i] != byteSub[j]) {
-          return false;
-        }
-      }
-      return true;
+      return StringExpr.equal(byteSub, 0, lenSub, byteS, start + len - lenSub, lenSub);
     }
   }
 
   /**
    * Matches the middle of each string to its pattern.
    */
-  protected static class MiddleChecker implements Checker {
-    byte[] byteSub;
-    int lenSub;
+  protected static final class MiddleChecker implements Checker {
+    final byte[] byteSub;
+    final int lenSub;
 
     MiddleChecker(String pattern) {
       try {
@@ -314,25 +309,134 @@ public abstract class AbstractFilterStringColLikeStringScalar extends VectorExpr
     }
 
     public boolean check(byte[] byteS, int start, int len) {
+      return index(byteS, start, len) != -1;
+    }
+
+    /*
+     * Returns absolute offset of the match
+     */
+    public int index(byte[] byteS, int start, int len) {
       if (len < lenSub) {
-        return false;
+        return -1;
       }
       int end = start + len - lenSub + 1;
-      boolean match = false;
       for (int i = start; i < end; i++) {
-        match = true;
-        for (int j = 0; j < lenSub; j++) {
-          if (byteS[i + j] != byteSub[j]) {
-            match = false;
-            break;
-          }
+        if (StringExpr.equal(byteSub, 0, lenSub, byteS, i, lenSub)) {
+          return i;
         }
-        if (match) {
-          return true;
+      }
+      return -1;
+    }
+  }
+
+  /**
+   * Matches a chained sequence of checkers.
+   *
+   * This has 4 chain scenarios cases in it (has no escaping or single char wildcards)
+   *
+   * 1) anchored left "abc%def%"
+   * 2) anchored right "%abc%def"
+   * 3) unanchored "%abc%def%"
+   * 4) anchored on both sides "abc%def"
+   */
+  protected static final class ChainedChecker implements Checker {
+
+    final int minLen;
+    final BeginChecker begin;
+    final EndChecker end;
+    final MiddleChecker[] middle;
+    final int[] midLens;
+    final int beginLen;
+    final int endLen;
+
+    ChainedChecker(String pattern) {
+      final StringTokenizer tokens = new StringTokenizer(pattern, "%");
+      final boolean leftAnchor = pattern.startsWith("%") == false;
+      final boolean rightAnchor = pattern.endsWith("%") == false;
+      int len = 0;
+      // at least 2 checkers always
+      BeginChecker left = null;
+      EndChecker right = null;
+      int leftLen = 0; // not -1
+      int rightLen = 0; // not -1
+      final List<MiddleChecker> checkers = new ArrayList<MiddleChecker>(2);
+      final List<Integer> lengths = new ArrayList<Integer>(2);
+
+      for (int i = 0; tokens.hasMoreTokens(); i++) {
+        String chunk = tokens.nextToken();
+        if (chunk.length() == 0) {
+          // %% is folded in the .*?.*? regex usually into .*?
+          continue;
+        }
+        len += utf8Length(chunk);
+        if (leftAnchor && i == 0) {
+          // first item
+          left = new BeginChecker(chunk);
+          leftLen = utf8Length(chunk);
+        } else if (rightAnchor && tokens.hasMoreTokens() == false) {
+          // last item
+          right = new EndChecker(chunk);
+          rightLen = utf8Length(chunk);
+        } else {
+          // middle items in order
+          checkers.add(new MiddleChecker(chunk));
+          lengths.add(utf8Length(chunk));
+        }
+      }
+      midLens = ArrayUtils.toPrimitive(lengths.toArray(ArrayUtils.EMPTY_INTEGER_OBJECT_ARRAY));
+      middle = checkers.toArray(new MiddleChecker[0]);
+      minLen = len;
+      begin = left;
+      end = right;
+      beginLen = leftLen;
+      endLen = rightLen;
+    }
+
+    public boolean check(byte[] byteS, final int start, final int len) {
+      int pos = start;
+      int mark = len;
+      if (len < minLen) {
+        return false;
+      }
+      // prefix, extend start
+      if (begin != null && false == begin.check(byteS, pos, mark)) {
+        // no match
+        return false;
+      } else {
+        pos += beginLen;
+        mark -= beginLen;
+      }
+      // suffix, reduce len
+      if (end != null && false == end.check(byteS, pos, mark)) {
+        // no match
+        return false;
+      } else {
+        // no pos change - no need since we've shrunk the string with same pos
+        mark -= endLen;
+      }
+      // loop for middles
+      for (int i = 0; i < middle.length; i++) {
+        int index = middle[i].index(byteS, pos, mark);
+        if (index == -1) {
+          // no match
+          return false;
+        } else {
+          mark -= ((index-pos) + midLens[i]);
+          pos = index + midLens[i];
         }
       }
-      return match;
+      // if all is good
+      return true;
     }
+
+    private int utf8Length(String chunk) {
+      try {
+        return chunk.getBytes("UTF-8").length;
+      } catch (UnsupportedEncodingException ue) {
+        throw new RuntimeException(ue);
+      }
+    }
+
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/fe14a908/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColLikeStringScalar.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColLikeStringScalar.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColLikeStringScalar.java
index c03c34e..0b279c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColLikeStringScalar.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/FilterStringColLikeStringScalar.java
@@ -38,6 +38,7 @@ public class FilterStringColLikeStringScalar extends AbstractFilterStringColLike
       new EndCheckerFactory(),
       new MiddleCheckerFactory(),
       new NoneCheckerFactory(),
+      new ChainedCheckerFactory(),
       new ComplexCheckerFactory());
 
   public FilterStringColLikeStringScalar() {
@@ -119,6 +120,23 @@ public class FilterStringColLikeStringScalar extends AbstractFilterStringColLike
   }
 
   /**
+   * Accepts chained LIKE patterns without escaping like "abc%def%ghi%" and creates corresponding
+   * checkers.
+   *
+   */
+  private static class ChainedCheckerFactory implements CheckerFactory {
+    private static final Pattern CHAIN_PATTERN = Pattern.compile("(%?[^%_\\\\]+%?)+");
+
+    public Checker tryCreate(String pattern) {
+      Matcher matcher = CHAIN_PATTERN.matcher(pattern);
+      if (matcher.matches()) {
+        return new ChainedChecker(pattern);
+      }
+      return null;
+    }
+  }
+
+  /**
    * Accepts any LIKE patterns and creates corresponding checkers.
    */
   private static class ComplexCheckerFactory implements CheckerFactory {

http://git-wip-us.apache.org/repos/asf/hive/blob/fe14a908/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
index a51837e..5c323ba 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringExpressions.java
@@ -18,8 +18,13 @@
 
 package org.apache.hadoop.hive.ql.exec.vector.expressions;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
+import java.util.Random;
+import java.util.StringTokenizer;
 
 import junit.framework.Assert;
 
@@ -55,15 +60,23 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.StringGroupColEqual
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.StringGroupColLessStringGroupColumn;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.StringScalarEqualStringGroupColumn;
 import org.apache.hadoop.hive.ql.exec.vector.expressions.gen.VarCharScalarEqualStringGroupColumn;
+import org.apache.hadoop.hive.ql.exec.vector.util.VectorizedRowGroupGenUtil;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.udf.UDFLike;
+import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test vectorized expression and filter evaluation for strings.
  */
 public class TestVectorStringExpressions {
 
+  private static final Logger LOG = LoggerFactory
+      .getLogger(TestVectorStringExpressions.class);
+
   private static byte[] red;
   private static byte[] redred;
   private static byte[] red2; // second copy of red, different object
@@ -99,7 +112,7 @@ public class TestVectorStringExpressions {
       mixedUpLower = "mixedup".getBytes("UTF-8");
       mixedUpUpper = "MIXEDUP".getBytes("UTF-8");
       mixPercentPattern = "mix%".getBytes("UTF-8"); // for use as wildcard pattern to test LIKE
-      multiByte = new byte[100];
+      multiByte = new byte[10];
       addMultiByteChars(multiByte);
       blanksLeft = "  foo".getBytes("UTF-8");
       blanksRight = "foo  ".getBytes("UTF-8");
@@ -4237,50 +4250,179 @@ public class TestVectorStringExpressions {
     Assert.assertEquals(initialBatchSize, batch.size);
   }
 
+  @Test
   public void testStringLikePatternType() throws UnsupportedEncodingException, HiveException {
     FilterStringColLikeStringScalar expr;
+    VectorizedRowBatch vrb = VectorizedRowGroupGenUtil.getVectorizedRowBatch(1, 1, 1);
+    vrb.cols[0] = new BytesColumnVector(1);
+    BytesColumnVector bcv = (BytesColumnVector) vrb.cols[0];
+    vrb.size = 0;
 
     // BEGIN pattern
     expr = new FilterStringColLikeStringScalar(0, "abc%".getBytes());
+    expr.evaluate(vrb);
     Assert.assertEquals(FilterStringColLikeStringScalar.BeginChecker.class,
         expr.checker.getClass());
 
     // END pattern
     expr = new FilterStringColLikeStringScalar(0, "%abc".getBytes("UTF-8"));
+    expr.evaluate(vrb);
     Assert.assertEquals(FilterStringColLikeStringScalar.EndChecker.class,
         expr.checker.getClass());
 
     // MIDDLE pattern
     expr = new FilterStringColLikeStringScalar(0, "%abc%".getBytes());
+    expr.evaluate(vrb);
     Assert.assertEquals(FilterStringColLikeStringScalar.MiddleChecker.class,
         expr.checker.getClass());
 
-    // COMPLEX pattern
+    // CHAIN pattern
     expr = new FilterStringColLikeStringScalar(0, "%abc%de".getBytes());
+    expr.evaluate(vrb);
+    Assert.assertEquals(FilterStringColLikeStringScalar.ChainedChecker.class,
+        expr.checker.getClass());
+
+    // COMPLEX pattern
+    expr = new FilterStringColLikeStringScalar(0, "%abc_%de".getBytes());
+    expr.evaluate(vrb);
     Assert.assertEquals(FilterStringColLikeStringScalar.ComplexChecker.class,
         expr.checker.getClass());
 
     // NONE pattern
     expr = new FilterStringColLikeStringScalar(0, "abc".getBytes());
+    expr.evaluate(vrb);
     Assert.assertEquals(FilterStringColLikeStringScalar.NoneChecker.class,
         expr.checker.getClass());
   }
 
-  public void testStringLikeMultiByte() throws HiveException {
+  @Test
+  public void testStringLikeMultiByte() throws HiveException, UnsupportedEncodingException {
     FilterStringColLikeStringScalar expr;
     VectorizedRowBatch batch;
 
     // verify that a multi byte LIKE expression matches a matching string
     batch = makeStringBatchMixedCharSize();
-    expr = new FilterStringColLikeStringScalar(0, ("%" + multiByte + "%").getBytes());
+    expr = new FilterStringColLikeStringScalar(0, ('%' + new String(multiByte) + '%').getBytes(StandardCharsets.UTF_8));
     expr.evaluate(batch);
-    Assert.assertEquals(batch.size, 1);
+    Assert.assertEquals(1, batch.size);
 
     // verify that a multi byte LIKE expression doesn't match a non-matching string
     batch = makeStringBatchMixedCharSize();
-    expr = new FilterStringColLikeStringScalar(0, ("%" + multiByte + "x").getBytes());
+    expr = new FilterStringColLikeStringScalar(0, ('%' + new String(multiByte) + 'x').getBytes(StandardCharsets.UTF_8));
     expr.evaluate(batch);
-    Assert.assertEquals(batch.size, 0);
+    Assert.assertEquals(0, batch.size);
+  }
+
+  private String randomizePattern(Random control, String value) {
+    switch (control.nextInt(10)) {
+    default:
+    case 0: {
+      return value;
+    }
+    case 1: {
+      return control.nextInt(1000) + value;
+    }
+    case 2: {
+      return value + control.nextInt(1000);
+    }
+    case 3: {
+      return control.nextInt(1000) + value.substring(1);
+    }
+    case 4: {
+      return value.substring(1) + control.nextInt(1000);
+    }
+    case 5: {
+      return control.nextInt(1000) + value.substring(0, value.length() - 1);
+    }
+    case 6: {
+      return "";
+    }
+    case 7: {
+      return value.toLowerCase();
+    }
+    case 8: {
+      StringBuffer sb = new StringBuffer(8);
+      for (int i = 0; i < control.nextInt(12); i++) {
+        sb.append((char) ('a' + control.nextInt(26)));
+      }
+      return sb.toString();
+    }
+    case 9: {
+      StringBuffer sb = new StringBuffer(8);
+      for (int i = 0; i < control.nextInt(12); i++) {
+        sb.append((char) ('A' + control.nextInt(26)));
+      }
+      return sb.toString();
+    }
+    }
+  }
+
+  private String generateCandidate(Random control, String pattern) {
+    StringBuffer sb = new StringBuffer();
+    final StringTokenizer tokens = new StringTokenizer(pattern, "%");
+    final boolean leftAnchor = pattern.startsWith("%");
+    final boolean rightAnchor = pattern.endsWith("%");
+    for (int i = 0; tokens.hasMoreTokens(); i++) {
+      String chunk = tokens.nextToken();
+      if (leftAnchor && i == 0) {
+        // first item
+        sb.append(randomizePattern(control, chunk));
+      } else if (rightAnchor && tokens.hasMoreTokens() == false) {
+        // last item
+        sb.append(randomizePattern(control, chunk));
+      } else {
+        // middle item
+        sb.append(randomizePattern(control, chunk));
+      }
+    }
+    return sb.toString();
+  }
+
+  @Test
+  public void testStringLikeRandomized() throws HiveException, UnsupportedEncodingException {
+    final String [] patterns = new String[] {
+        "ABC%",
+        "%ABC",
+        "%ABC%",
+        "ABC%DEF",
+        "ABC%DEF%",
+        "%ABC%DEF",
+        "%ABC%DEF%",
+        "ABC%DEF%EFG",
+        "%ABC%DEF%EFG",
+        "%ABC%DEF%EFG%H",
+    };
+    long positive = 0;
+    long negative = 0;
+    Random control = new Random(1234);
+    UDFLike udf = new UDFLike();
+    for (String pattern : patterns) {
+      VectorExpression expr = new FilterStringColLikeStringScalar(0, pattern.getBytes("utf-8"));
+      VectorizedRowBatch batch = VectorizedRowGroupGenUtil.getVectorizedRowBatch(1, 1, 1);
+      batch.cols[0] = new BytesColumnVector(1);
+      BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
+
+      Text pText = new Text(pattern);
+      for (int i=0; i < 1024; i++) {
+        String input = generateCandidate(control,pattern);
+        BooleanWritable like = udf.evaluate(new Text(input), pText);
+        batch.reset();
+        bcv.initBuffer();
+        byte[] utf8 = input.getBytes("utf-8");
+        bcv.setVal(0, utf8, 0, utf8.length);
+        bcv.noNulls = true;
+        batch.size = 1;
+        expr.evaluate(batch);
+        if (like.get()) {
+          positive++;
+        } else {
+          negative++;
+        }
+        assertEquals(String.format("Checking '%s' against '%s'", input, pattern), like.get(), (batch.size != 0));
+      }
+    }
+    LOG.info(String.format("Randomized testing: ran %d positive tests and %d negative tests",
+        positive, negative));
   }
 
   @Test


[26/51] [abbrv] hive git commit: HIVE-12558: LLAP: output QueryFragmentCounters somewhere (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by jd...@apache.org.
HIVE-12558: LLAP: output QueryFragmentCounters somewhere (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b6023c79
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b6023c79
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b6023c79

Branch: refs/heads/llap
Commit: b6023c796f0daa37aef2a59b57aa1a29548c8211
Parents: 456a91e
Author: Prasanth Jayachandran <j....@gmail.com>
Authored: Thu Mar 10 21:31:39 2016 -0600
Committer: Prasanth Jayachandran <j....@gmail.com>
Committed: Thu Mar 10 21:31:39 2016 -0600

----------------------------------------------------------------------
 .../TestOperationLoggingAPIWithTez.java         |   2 +-
 .../hive/llap/counters/LlapIOCounters.java      |  37 +++++
 .../hive/llap/counters/FragmentCountersMap.java |  46 +++++++
 .../llap/counters/QueryFragmentCounters.java    |  65 ++++-----
 .../hive/llap/daemon/impl/LlapTaskReporter.java |  14 +-
 .../llap/daemon/impl/TaskRunnerCallable.java    |  14 +-
 .../hive/llap/io/api/impl/LlapInputFormat.java  |  30 +++-
 .../llap/io/decode/OrcEncodedDataConsumer.java  |   9 +-
 .../llap/io/encoded/OrcEncodedDataReader.java   |  31 ++---
 .../hadoop/hive/ql/exec/tez/TezJobMonitor.java  | 136 +++++++++++++++----
 10 files changed, 291 insertions(+), 93 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
index bee1447..8b5b516 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithTez.java
@@ -54,7 +54,7 @@ public class TestOperationLoggingAPIWithTez extends OperationLoggingAPITestBase
       "org.apache.tez.common.counters.DAGCounter",
       "NUM_SUCCEEDED_TASKS",
       "TOTAL_LAUNCHED_TASKS",
-      "CPU_TIME_MILLIS"
+      "CPU_MILLISECONDS"
     };
     hiveConf = new HiveConf();
     hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose");

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-common/src/java/org/apache/hadoop/hive/llap/counters/LlapIOCounters.java
----------------------------------------------------------------------
diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/counters/LlapIOCounters.java b/llap-common/src/java/org/apache/hadoop/hive/llap/counters/LlapIOCounters.java
new file mode 100644
index 0000000..365ddab
--- /dev/null
+++ b/llap-common/src/java/org/apache/hadoop/hive/llap/counters/LlapIOCounters.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.counters;
+
+/**
+ * LLAP IO related counters.
+ */
+public enum LlapIOCounters {
+  NUM_VECTOR_BATCHES,
+  NUM_DECODED_BATCHES,
+  SELECTED_ROWGROUPS,
+  NUM_ERRORS,
+  ROWS_EMITTED,
+  METADATA_CACHE_HIT,
+  METADATA_CACHE_MISS,
+  CACHE_HIT_BYTES,
+  CACHE_MISS_BYTES,
+  ALLOCATED_BYTES,
+  ALLOCATED_USED_BYTES,
+  TOTAL_IO_TIME_NS,
+  DECODE_TIME_NS,
+  HDFS_TIME_NS,
+  CONSUMER_TIME_NS
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/counters/FragmentCountersMap.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/counters/FragmentCountersMap.java b/llap-server/src/java/org/apache/hadoop/hive/llap/counters/FragmentCountersMap.java
new file mode 100644
index 0000000..383b65f
--- /dev/null
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/counters/FragmentCountersMap.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.llap.counters;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.tez.common.counters.TezCounters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Maintains references to tez counters
+ */
+public class FragmentCountersMap {
+  private static final Logger LOG = LoggerFactory.getLogger(FragmentCountersMap.class);
+  private static final ConcurrentMap<String, TezCounters> perFragmentCounters = new ConcurrentHashMap<>();
+
+  public static void registerCountersForFragment(String identifier, TezCounters tezCounters) {
+    if (perFragmentCounters.putIfAbsent(identifier, tezCounters) != null) {
+      LOG.warn("Not registering duplicate counters for fragment with tez identifier string=" +
+          identifier);
+    }
+  }
+
+  public static TezCounters getCountersForFragment(String identifier) {
+    return perFragmentCounters.get(identifier);
+  }
+
+  public static void unregisterCountersForFragment(String identifier) {
+    perFragmentCounters.remove(identifier);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/counters/QueryFragmentCounters.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/counters/QueryFragmentCounters.java b/llap-server/src/java/org/apache/hadoop/hive/llap/counters/QueryFragmentCounters.java
index 5d16f72..a53ac61 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/counters/QueryFragmentCounters.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/counters/QueryFragmentCounters.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.cache.LowLevelCacheCounters;
+import org.apache.tez.common.counters.TezCounters;
 
 /**
  * Per query counters.
@@ -30,24 +31,6 @@ import org.apache.hadoop.hive.llap.cache.LowLevelCacheCounters;
 public class QueryFragmentCounters implements LowLevelCacheCounters {
   private final boolean doUseTimeCounters;
 
-  public static enum Counter {
-    NUM_VECTOR_BATCHES,
-    NUM_DECODED_BATCHES,
-    SELECTED_ROWGROUPS,
-    NUM_ERRORS,
-    ROWS_EMITTED,
-    METADATA_CACHE_HIT,
-    METADATA_CACHE_MISS,
-    CACHE_HIT_BYTES,
-    CACHE_MISS_BYTES,
-    ALLOCATED_BYTES,
-    ALLOCATED_USED_BYTES,
-    TOTAL_IO_TIME_US,
-    DECODE_TIME_US,
-    HDFS_TIME_US,
-    CONSUMER_TIME_US
-  }
-
   public static enum Desc {
     MACHINE,
     TABLE,
@@ -57,25 +40,30 @@ public class QueryFragmentCounters implements LowLevelCacheCounters {
 
   private final AtomicLongArray fixedCounters;
   private final Object[] descs;
+  private final TezCounters tezCounters;
 
-  public QueryFragmentCounters(Configuration conf) {
-    fixedCounters = new AtomicLongArray(Counter.values().length);
+  public QueryFragmentCounters(Configuration conf, final TezCounters tezCounters) {
+    fixedCounters = new AtomicLongArray(LlapIOCounters.values().length);
     descs = new Object[Desc.values().length];
     doUseTimeCounters = HiveConf.getBoolVar(conf, ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS);
+    this.tezCounters = tezCounters;
     if (!doUseTimeCounters) {
-      setCounter(Counter.TOTAL_IO_TIME_US, -1);
-      setCounter(Counter.DECODE_TIME_US, -1);
-      setCounter(Counter.HDFS_TIME_US, -1);
-      setCounter(Counter.CONSUMER_TIME_US, -1);
+      setCounter(LlapIOCounters.TOTAL_IO_TIME_NS, -1);
+      setCounter(LlapIOCounters.DECODE_TIME_NS, -1);
+      setCounter(LlapIOCounters.HDFS_TIME_NS, -1);
+      setCounter(LlapIOCounters.CONSUMER_TIME_NS, -1);
     }
   }
 
-  public void incrCounter(Counter counter) {
+  public void incrCounter(LlapIOCounters counter) {
     incrCounter(counter, 1);
   }
 
-  public void incrCounter(Counter counter, long delta) {
+  public void incrCounter(LlapIOCounters counter, long delta) {
     fixedCounters.addAndGet(counter.ordinal(), delta);
+    if (tezCounters != null) {
+      tezCounters.findCounter(LlapIOCounters.values()[counter.ordinal()]).increment(delta);
+    }
   }
 
   @Override
@@ -83,13 +71,20 @@ public class QueryFragmentCounters implements LowLevelCacheCounters {
     return (doUseTimeCounters ? System.nanoTime() : 0);
   }
 
-  public void incrTimeCounter(Counter counter, long startTime) {
+  public void incrTimeCounter(LlapIOCounters counter, long startTime) {
     if (!doUseTimeCounters) return;
-    fixedCounters.addAndGet(counter.ordinal(), System.nanoTime() - startTime);
+    long delta = System.nanoTime() - startTime;
+    fixedCounters.addAndGet(counter.ordinal(), delta);
+    if (tezCounters != null) {
+      tezCounters.findCounter(LlapIOCounters.values()[counter.ordinal()]).increment(delta);
+    }
   }
 
-  public void setCounter(Counter counter, long value) {
+  public void setCounter(LlapIOCounters counter, long value) {
     fixedCounters.set(counter.ordinal(), value);
+    if (tezCounters != null) {
+      tezCounters.findCounter(LlapIOCounters.values()[counter.ordinal()]).setValue(value);
+    }
   }
 
   public void setDesc(Desc key, Object desc) {
@@ -98,23 +93,23 @@ public class QueryFragmentCounters implements LowLevelCacheCounters {
 
   @Override
   public void recordCacheHit(long bytesHit) {
-    incrCounter(Counter.CACHE_HIT_BYTES, bytesHit);
+    incrCounter(LlapIOCounters.CACHE_HIT_BYTES, bytesHit);
   }
 
   @Override
   public void recordCacheMiss(long bytesMissed) {
-    incrCounter(Counter.CACHE_MISS_BYTES, bytesMissed);
+    incrCounter(LlapIOCounters.CACHE_MISS_BYTES, bytesMissed);
   }
 
   @Override
   public void recordAllocBytes(long bytesUsed, long bytesAllocated) {
-    incrCounter(Counter.ALLOCATED_USED_BYTES, bytesUsed);
-    incrCounter(Counter.ALLOCATED_BYTES, bytesAllocated);
+    incrCounter(LlapIOCounters.ALLOCATED_USED_BYTES, bytesUsed);
+    incrCounter(LlapIOCounters.ALLOCATED_BYTES, bytesAllocated);
   }
 
   @Override
   public void recordHdfsTime(long startTime) {
-    incrTimeCounter(Counter.HDFS_TIME_US, startTime);
+    incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
   }
 
   @Override
@@ -135,7 +130,7 @@ public class QueryFragmentCounters implements LowLevelCacheCounters {
       if (i != 0) {
         sb.append(", ");
       }
-      sb.append(Counter.values()[i].name()).append("=").append(fixedCounters.get(i));
+      sb.append(LlapIOCounters.values()[i].name()).append("=").append(fixedCounters.get(i));
     }
     sb.append(" ]");
     return sb.toString();

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
index bb9f341..08c6f27 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapTaskReporter.java
@@ -34,6 +34,7 @@ import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
+import org.apache.hadoop.hive.llap.counters.FragmentCountersMap;
 import org.apache.hadoop.hive.llap.protocol.LlapTaskUmbilicalProtocol;
 import org.apache.tez.common.counters.TezCounters;
 import org.apache.tez.dag.api.TezException;
@@ -43,11 +44,11 @@ import org.apache.tez.runtime.api.events.TaskAttemptCompletedEvent;
 import org.apache.tez.runtime.api.events.TaskAttemptFailedEvent;
 import org.apache.tez.runtime.api.events.TaskStatusUpdateEvent;
 import org.apache.tez.runtime.api.impl.EventMetaData;
+import org.apache.tez.runtime.api.impl.EventMetaData.EventProducerConsumerType;
 import org.apache.tez.runtime.api.impl.TaskStatistics;
 import org.apache.tez.runtime.api.impl.TezEvent;
 import org.apache.tez.runtime.api.impl.TezHeartbeatRequest;
 import org.apache.tez.runtime.api.impl.TezHeartbeatResponse;
-import org.apache.tez.runtime.api.impl.EventMetaData.EventProducerConsumerType;
 import org.apache.tez.runtime.internals.api.TaskReporterInterface;
 import org.apache.tez.runtime.task.ErrorReporter;
 import org.slf4j.Logger;
@@ -71,13 +72,13 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 public class LlapTaskReporter implements TaskReporterInterface {
 
   private static final Logger LOG = LoggerFactory.getLogger(LlapTaskReporter.class);
-
   private final LlapTaskUmbilicalProtocol umbilical;
   private final long pollInterval;
   private final long sendCounterInterval;
   private final int maxEventsToGet;
   private final AtomicLong requestCounter;
   private final String containerIdStr;
+  private final String fragmentFullId;
 
   private final ListeningExecutorService heartbeatExecutor;
 
@@ -85,13 +86,15 @@ public class LlapTaskReporter implements TaskReporterInterface {
   HeartbeatCallable currentCallable;
 
   public LlapTaskReporter(LlapTaskUmbilicalProtocol umbilical, long amPollInterval,
-                      long sendCounterInterval, int maxEventsToGet, AtomicLong requestCounter, String containerIdStr) {
+                      long sendCounterInterval, int maxEventsToGet, AtomicLong requestCounter,
+      String containerIdStr, final String fragFullId) {
     this.umbilical = umbilical;
     this.pollInterval = amPollInterval;
     this.sendCounterInterval = sendCounterInterval;
     this.maxEventsToGet = maxEventsToGet;
     this.requestCounter = requestCounter;
     this.containerIdStr = containerIdStr;
+    this.fragmentFullId = fragFullId;
     ExecutorService executor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder()
         .setDaemon(true).setNameFormat("TaskHeartbeatThread").build());
     heartbeatExecutor = MoreExecutors.listeningDecorator(executor);
@@ -103,6 +106,9 @@ public class LlapTaskReporter implements TaskReporterInterface {
   @Override
   public synchronized void registerTask(RuntimeTask task,
                                         ErrorReporter errorReporter) {
+    TezCounters tezCounters = task.addAndGetTezCounter(fragmentFullId);
+    FragmentCountersMap.registerCountersForFragment(fragmentFullId, tezCounters);
+    LOG.info("Registered counters for fragment: {} vertexName: {}", fragmentFullId, task.getVertexName());
     currentCallable = new HeartbeatCallable(task, umbilical, pollInterval, sendCounterInterval,
         maxEventsToGet, requestCounter, containerIdStr);
     ListenableFuture<Boolean> future = heartbeatExecutor.submit(currentCallable);
@@ -115,6 +121,8 @@ public class LlapTaskReporter implements TaskReporterInterface {
    */
   @Override
   public synchronized void unregisterTask(TezTaskAttemptID taskAttemptID) {
+    LOG.info("Unregistered counters for fragment: {}", fragmentFullId);
+    FragmentCountersMap.unregisterCountersForFragment(fragmentFullId);
     currentCallable.markComplete();
     currentCallable = null;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
index d88d82a..a1cfbb8 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/TaskRunnerCallable.java
@@ -51,6 +51,10 @@ import org.apache.tez.common.TezCommonUtils;
 import org.apache.tez.common.security.JobTokenIdentifier;
 import org.apache.tez.common.security.TokenCache;
 import org.apache.tez.dag.api.TezConstants;
+import org.apache.tez.dag.records.TezDAGID;
+import org.apache.tez.dag.records.TezTaskAttemptID;
+import org.apache.tez.dag.records.TezTaskID;
+import org.apache.tez.dag.records.TezVertexID;
 import org.apache.tez.hadoop.shim.HadoopShim;
 import org.apache.tez.runtime.api.ExecutionContext;
 import org.apache.tez.runtime.api.impl.TaskSpec;
@@ -64,6 +68,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import com.google.common.base.Stopwatch;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
@@ -201,13 +206,20 @@ public class TaskRunnerCallable extends CallableWithNdc<TaskRunner2Result> {
       }
     });
 
+    TezTaskAttemptID taskAttemptID = taskSpec.getTaskAttemptID();
+    TezTaskID taskId = taskAttemptID.getTaskID();
+    TezVertexID tezVertexID = taskId.getVertexID();
+    TezDAGID tezDAGID = tezVertexID.getDAGId();
+    String fragFullId = Joiner.on('_').join(tezDAGID.getId(), tezVertexID.getId(), taskId.getId(),
+        taskAttemptID.getId());
     taskReporter = new LlapTaskReporter(
         umbilical,
         confParams.amHeartbeatIntervalMsMax,
         confParams.amCounterHeartbeatInterval,
         confParams.amMaxEventsPerHeartbeat,
         new AtomicLong(0),
-        request.getContainerIdString());
+        request.getContainerIdString(),
+        fragFullId);
 
     String attemptId = fragmentInfo.getFragmentIdentifierString();
     IOContextMap.setThreadAttemptId(attemptId);

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
index a3d71c0..85cca97 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapInputFormat.java
@@ -27,15 +27,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.llap.ConsumerFeedback;
 import org.apache.hadoop.hive.llap.DebugUtils;
+import org.apache.hadoop.hive.llap.counters.FragmentCountersMap;
+import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
 import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
-import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters.Counter;
 import org.apache.hadoop.hive.llap.io.decode.ColumnVectorProducer;
 import org.apache.hadoop.hive.llap.io.decode.ReadPipeline;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedInputFormatInterface;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
-import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.AvoidSplitCombination;
 import org.apache.hadoop.hive.ql.io.LlapAwareSplit;
 import org.apache.hadoop.hive.ql.io.SelfDescribingInputFormatInterface;
@@ -53,7 +53,12 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RecordReader;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hive.common.util.HiveStringUtils;
+import org.apache.tez.common.counters.TezCounters;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -120,6 +125,7 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
 
   private class LlapRecordReader
       implements RecordReader<NullWritable, VectorizedRowBatch>, Consumer<ColumnVectorBatch> {
+    private final Logger LOG = LoggerFactory.getLogger(LlapRecordReader.class);
     private final FileSplit split;
     private final List<Integer> columnIds;
     private final SearchArgument sarg;
@@ -147,7 +153,21 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
       this.columnIds = includedCols;
       this.sarg = ConvertAstToSearchArg.createFromConf(job);
       this.columnNames = ColumnProjectionUtils.getReadColumnNames(job);
-      this.counters = new QueryFragmentCounters(job);
+      String dagId = job.get("tez.mapreduce.dag.index");
+      String vertexId = job.get("tez.mapreduce.vertex.index");
+      String taskId = job.get("tez.mapreduce.task.index");
+      String taskAttemptId = job.get("tez.mapreduce.task.attempt.index");
+      TezCounters taskCounters = null;
+      if (dagId != null && vertexId != null && taskId != null && taskAttemptId != null) {
+        String fullId = Joiner.on('_').join(dagId, vertexId, taskId, taskAttemptId);
+        taskCounters = FragmentCountersMap.getCountersForFragment(fullId);
+        LOG.info("Received dagid_vertexid_taskid_attempid: {}", fullId);
+      } else {
+        LOG.warn("Not using tez counters as some identifier is null." +
+            " dagId: {} vertexId: {} taskId: {} taskAttempId: {}",
+            dagId, vertexId, taskId, taskAttemptId);
+      }
+      this.counters = new QueryFragmentCounters(job, taskCounters);
       this.counters.setDesc(QueryFragmentCounters.Desc.MACHINE, hostName);
 
       MapWork mapWork = Utilities.getMapWork(job);
@@ -192,7 +212,7 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
         if (wasFirst) {
           firstReturnTime = counters.startTimeCounter();
         }
-        counters.incrTimeCounter(Counter.CONSUMER_TIME_US, firstReturnTime);
+        counters.incrTimeCounter(LlapIOCounters.CONSUMER_TIME_NS, firstReturnTime);
         return false;
       }
       if (columnIds.size() != cvb.cols.length) {
@@ -330,7 +350,7 @@ public class LlapInputFormat implements InputFormat<NullWritable, VectorizedRowB
 
     @Override
     public void setError(Throwable t) {
-      counters.incrCounter(QueryFragmentCounters.Counter.NUM_ERRORS);
+      counters.incrCounter(LlapIOCounters.NUM_ERRORS);
       LlapIoImpl.LOG.info("setError called; closed " + isClosed
         + ", done " + isDone + ", err " + pendingError + ", pending " + pendingData.size());
       assert t != null;

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
index 2597848..28cae87 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/decode/OrcEncodedDataConsumer.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch;
 import org.apache.hadoop.hive.common.io.encoded.EncodedColumnBatch.ColumnStreamData;
+import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
 import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
 import org.apache.hadoop.hive.llap.io.api.impl.ColumnVectorBatch;
 import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
@@ -118,11 +119,11 @@ public class OrcEncodedDataConsumer
 
         // we are done reading a batch, send it to consumer for processing
         downstreamConsumer.consumeData(cvb);
-        counters.incrCounter(QueryFragmentCounters.Counter.ROWS_EMITTED, batchSize);
+        counters.incrCounter(LlapIOCounters.ROWS_EMITTED, batchSize);
       }
-      counters.incrTimeCounter(QueryFragmentCounters.Counter.DECODE_TIME_US, startTime);
-      counters.incrCounter(QueryFragmentCounters.Counter.NUM_VECTOR_BATCHES, maxBatchesRG);
-      counters.incrCounter(QueryFragmentCounters.Counter.NUM_DECODED_BATCHES);
+      counters.incrTimeCounter(LlapIOCounters.DECODE_TIME_NS, startTime);
+      counters.incrCounter(LlapIOCounters.NUM_VECTOR_BATCHES, maxBatchesRG);
+      counters.incrCounter(LlapIOCounters.NUM_DECODED_BATCHES);
     } catch (IOException e) {
       // Caller will return the batch.
       downstreamConsumer.setError(e);

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
index b36cf64..bcee56b 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/encoded/OrcEncodedDataReader.java
@@ -25,6 +25,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 
+import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hive.llap.cache.Cache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache;
 import org.apache.hadoop.hive.llap.cache.LowLevelCache.Priority;
 import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters;
-import org.apache.hadoop.hive.llap.counters.QueryFragmentCounters.Counter;
 import org.apache.hadoop.hive.llap.io.api.impl.LlapIoImpl;
 import org.apache.hadoop.hive.llap.io.decode.OrcEncodedDataConsumer;
 import org.apache.hadoop.hive.llap.io.metadata.OrcFileMetadata;
@@ -78,7 +78,6 @@ import org.apache.hadoop.hive.ql.io.orc.RecordReaderUtils;
 import org.apache.orc.StripeInformation;
 import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
 import org.apache.hadoop.mapred.FileSplit;
-import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hive.common.util.FixedSizedObjectPool;
 import org.apache.orc.OrcProto;
@@ -391,12 +390,12 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
           }
           isFoundInCache = (stripeMetadata != null);
           if (!isFoundInCache) {
-            counters.incrCounter(Counter.METADATA_CACHE_MISS);
+            counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
             ensureMetadataReader();
             long startTimeHdfs = counters.startTimeCounter();
             stripeMetadata = new OrcStripeMetadata(new OrcBatchKey(fileId, stripeIx, 0),
                 metadataReader, stripe, stripeIncludes, sargColumns);
-            counters.incrTimeCounter(Counter.HDFS_TIME_US, startTimeHdfs);
+            counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTimeHdfs);
             if (hasFileId && metadataCache != null) {
               stripeMetadata = metadataCache.putStripeMetadata(stripeMetadata);
               if (DebugUtils.isTraceOrcEnabled()) {
@@ -413,11 +412,11 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
                 + " metadata for includes: " + DebugUtils.toString(stripeIncludes));
           }
           assert isFoundInCache;
-          counters.incrCounter(Counter.METADATA_CACHE_MISS);
+          counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
           ensureMetadataReader();
           updateLoadedIndexes(stripeMetadata, stripe, stripeIncludes, sargColumns);
         } else if (isFoundInCache) {
-          counters.incrCounter(Counter.METADATA_CACHE_HIT);
+          counters.incrCounter(LlapIOCounters.METADATA_CACHE_HIT);
         }
       } catch (Throwable t) {
         consumer.setError(t);
@@ -462,7 +461,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
   }
 
   private void recordReaderTime(long startTime) {
-    counters.incrTimeCounter(Counter.TOTAL_IO_TIME_US, startTime);
+    counters.incrTimeCounter(LlapIOCounters.TOTAL_IO_TIME_NS, startTime);
   }
 
   private static String getDbAndTableName(Path path) {
@@ -571,7 +570,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
       if (stripeMetadata.hasAllIndexes(stripeIncludes)) return;
       long startTime = counters.startTimeCounter();
       stripeMetadata.loadMissingIndexes(metadataReader, stripe, stripeIncludes, sargColumns);
-      counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
+      counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
     }
   }
 
@@ -610,7 +609,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     long startTime = counters.startTimeCounter();
     ReaderOptions opts = OrcFile.readerOptions(conf).filesystem(fs).fileMetadata(fileMetadata);
     orcReader = EncodedOrcFile.createReader(path, opts);
-    counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
+    counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
   }
 
   /**
@@ -621,10 +620,10 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     if (fileId != null && metadataCache != null) {
       metadata = metadataCache.getFileMetadata(fileId);
       if (metadata != null) {
-        counters.incrCounter(Counter.METADATA_CACHE_HIT);
+        counters.incrCounter(LlapIOCounters.METADATA_CACHE_HIT);
         return metadata;
       } else {
-        counters.incrCounter(Counter.METADATA_CACHE_MISS);
+        counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
       }
     }
     ensureOrcReader();
@@ -651,14 +650,14 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
         value = metadataCache.getStripeMetadata(stripeKey);
       }
       if (value == null || !value.hasAllIndexes(globalInc)) {
-        counters.incrCounter(Counter.METADATA_CACHE_MISS);
+        counters.incrCounter(LlapIOCounters.METADATA_CACHE_MISS);
         ensureMetadataReader();
         StripeInformation si = fileMetadata.getStripes().get(stripeIx);
         if (value == null) {
           long startTime = counters.startTimeCounter();
           value = new OrcStripeMetadata(new OrcBatchKey(fileId, stripeIx, 0),
               metadataReader, si, globalInc, sargColumns);
-          counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
+          counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
           if (hasFileId && metadataCache != null) {
             value = metadataCache.putStripeMetadata(value);
             if (DebugUtils.isTraceOrcEnabled()) {
@@ -676,7 +675,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
           updateLoadedIndexes(value, si, globalInc, sargColumns);
         }
       } else {
-        counters.incrCounter(Counter.METADATA_CACHE_HIT);
+        counters.incrCounter(LlapIOCounters.METADATA_CACHE_HIT);
       }
       result.add(value);
       consumer.setStripeMetadata(value);
@@ -689,7 +688,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     if (metadataReader != null) return;
     long startTime = counters.startTimeCounter();
     metadataReader = orcReader.metadata();
-    counters.incrTimeCounter(Counter.HDFS_TIME_US, startTime);
+    counters.incrTimeCounter(LlapIOCounters.HDFS_TIME_NS, startTime);
   }
 
   @Override
@@ -772,7 +771,7 @@ public class OrcEncodedDataReader extends CallableWithNdc<Void>
     } else if (!isNone) {
       count = rgCount;
     }
-    counters.setCounter(QueryFragmentCounters.Counter.SELECTED_ROWGROUPS, count);
+    counters.setCounter(LlapIOCounters.SELECTED_ROWGROUPS, count);
   }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/b6023c79/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
index c8d135e..418a03e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java
@@ -26,7 +26,6 @@ import java.io.InterruptedIOException;
 import java.io.PrintStream;
 import java.text.DecimalFormat;
 import java.text.NumberFormat;
-import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -38,6 +37,7 @@ import java.util.SortedSet;
 import java.util.TreeSet;
 
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.llap.counters.LlapIOCounters;
 import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
 import org.apache.hadoop.hive.ql.exec.MapOperator;
 import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
@@ -72,9 +72,13 @@ public class TezJobMonitor {
 
   private static final int COLUMN_1_WIDTH = 16;
   private static final int SEPARATOR_WIDTH = InPlaceUpdates.MIN_TERMINAL_WIDTH;
+  private static final String SEPARATOR = new String(new char[SEPARATOR_WIDTH]).replace("\0", "-");
+  private static final String PREP_SUMMARY_HEADER = "DAG Preparation Summary";
+  private static final String TASK_SUMMARY_HEADER = "Task Execution Summary";
+  private static final String LLAP_IO_SUMMARY_HEADER = "LLAP IO Summary";
 
   // keep this within 80 chars width. If more columns needs to be added then update min terminal
-  // width requirement and separator width accordingly
+  // width requirement and SEPARATOR width accordingly
   private static final String HEADER_FORMAT = "%16s%10s %13s  %5s  %9s  %7s  %7s  %6s  %6s  ";
   private static final String VERTEX_FORMAT = "%-16s%10s %13s  %5s  %9s  %7s  %7s  %6s  %6s  ";
   private static final String FOOTER_FORMAT = "%-15s  %-30s %-4s  %-25s";
@@ -82,12 +86,15 @@ public class TezJobMonitor {
       "VERTICES", "MODE", "STATUS", "TOTAL", "COMPLETED", "RUNNING", "PENDING", "FAILED", "KILLED");
 
   // method and dag summary format
-  private static final String SUMMARY_HEADER_FORMAT = "%-16s %-12s %-12s %-12s %-19s %-19s %-15s %-15s %-15s";
-  private static final String SUMMARY_VERTEX_FORMAT = "%-16s %11s %16s %12s %16s %18s %18s %14s %16s";
+  private static final String SUMMARY_HEADER_FORMAT = "%10s %14s %13s %12s %14s %15s";
   private static final String SUMMARY_HEADER = String.format(SUMMARY_HEADER_FORMAT,
-      "VERTICES", "TOTAL_TASKS", "FAILED_ATTEMPTS", "KILLED_TASKS", "DURATION_SECONDS",
-      "CPU_TIME_MILLIS", "GC_TIME_MILLIS", "INPUT_RECORDS", "OUTPUT_RECORDS");
+      "VERTICES", "DURATION(ms)", "CPU_TIME(ms)", "GC_TIME(ms)", "INPUT_RECORDS", "OUTPUT_RECORDS");
 
+  // LLAP counters
+  private static final String LLAP_SUMMARY_HEADER_FORMAT = "%10s %9s %9s %10s %9s %10s %11s %8s %9s";
+  private static final String LLAP_SUMMARY_HEADER = String.format(LLAP_SUMMARY_HEADER_FORMAT,
+      "VERTICES", "ROWGROUPS", "META_HIT", "META_MISS", "DATA_HIT", "DATA_MISS",
+      "ALLOCATION", "USED", "TOTAL_IO");
   private static final String TOTAL_PREP_TIME = "TotalPrepTime";
   private static final String METHOD = "METHOD";
   private static final String DURATION = "DURATION(ms)";
@@ -95,7 +102,6 @@ public class TezJobMonitor {
   // in-place progress update related variables
   private int lines;
   private final PrintStream out;
-  private String separator;
 
   private transient LogHelper console;
   private final PerfLogger perfLogger = SessionState.getPerfLogger();
@@ -142,10 +148,6 @@ public class TezJobMonitor {
     // all progress updates are written to info stream and log file. In-place updates can only be
     // done to info stream (console)
     out = console.getInfoStream();
-    separator = "";
-    for (int i = 0; i < SEPARATOR_WIDTH; i++) {
-      separator += "-";
-    }
   }
 
   /**
@@ -219,7 +221,8 @@ public class TezJobMonitor {
     Set<StatusGetOpts> opts = new HashSet<StatusGetOpts>();
     long startTime = 0;
     boolean isProfileEnabled = HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) ||
-      Utilities.isPerfOrAboveLogging(conf);
+        Utilities.isPerfOrAboveLogging(conf);
+    boolean llapIoEnabled = HiveConf.getBoolVar(conf, HiveConf.ConfVars.LLAP_IO_ENABLED);
 
     boolean inPlaceEligible = InPlaceUpdates.inPlaceEligible(conf);
     synchronized(shutdownList) {
@@ -285,8 +288,23 @@ public class TezJobMonitor {
                   + String.format("%.2f seconds", duration));
               console.printInfo("\n");
 
+              console.printInfo(PREP_SUMMARY_HEADER);
               printMethodsSummary();
+              console.printInfo(SEPARATOR);
+              console.printInfo("");
+
+              console.printInfo(TASK_SUMMARY_HEADER);
               printDagSummary(progressMap, console, dagClient, conf, dag);
+              console.printInfo(SEPARATOR);
+              console.printInfo("");
+
+              if (llapIoEnabled) {
+                console.printInfo(LLAP_IO_SUMMARY_HEADER);
+                printLlapIOSummary(progressMap, console, dagClient);
+                console.printInfo(SEPARATOR);
+              }
+
+              console.printInfo("\n");
             }
             running = false;
             done = true;
@@ -408,7 +426,9 @@ public class TezJobMonitor {
 
     /* Build the method summary header */
     String methodBreakdownHeader = String.format("%-30s %-13s", METHOD, DURATION);
-    console.printInfo(methodBreakdownHeader);
+    console.printInfo(SEPARATOR);
+    reprintLineWithColorAsBold(methodBreakdownHeader, Ansi.Color.CYAN);
+    console.printInfo(SEPARATOR);
 
     for (String method : perfLoggerReportMethods) {
       long duration = perfLogger.getDuration(method);
@@ -423,7 +443,7 @@ public class TezJobMonitor {
     totalInPrepTime = perfLogger.getStartTime(PerfLogger.TEZ_RUN_DAG) -
         perfLogger.getStartTime(PerfLogger.TIME_TO_SUBMIT);
 
-    console.printInfo(String.format("%-30s %11s\n", TOTAL_PREP_TIME, commaFormat.format(
+    console.printInfo(String.format("%-30s %11s", TOTAL_PREP_TIME, commaFormat.format(
         totalInPrepTime)));
   }
 
@@ -448,18 +468,16 @@ public class TezJobMonitor {
     }
 
     /* Print the per Vertex summary */
-    console.printInfo(SUMMARY_HEADER);
+    console.printInfo(SEPARATOR);
+    reprintLineWithColorAsBold(SUMMARY_HEADER, Ansi.Color.CYAN);
+    console.printInfo(SEPARATOR);
     SortedSet<String> keys = new TreeSet<String>(progressMap.keySet());
     Set<StatusGetOpts> statusOptions = new HashSet<StatusGetOpts>(1);
     statusOptions.add(StatusGetOpts.GET_COUNTERS);
     for (String vertexName : keys) {
       Progress progress = progressMap.get(vertexName);
       if (progress != null) {
-        final int totalTasks = progress.getTotalTaskCount();
-        final int failedTaskAttempts = progress.getFailedTaskAttemptCount();
-        final int killedTaskAttempts = progress.getKilledTaskAttemptCount();
-        final double duration =
-            perfLogger.getDuration(PerfLogger.TEZ_RUN_VERTEX + vertexName) / 1000.0;
+        final double duration = perfLogger.getDuration(PerfLogger.TEZ_RUN_VERTEX + vertexName);
         VertexStatus vertexStatus = null;
         try {
           vertexStatus = dagClient.getVertexStatus(vertexName, statusOptions);
@@ -540,11 +558,8 @@ public class TezJobMonitor {
                     + vertexName.replace(" ", "_"))
                 + hiveOutputIntermediateRecords;
 
-        String vertexExecutionStats = String.format(SUMMARY_VERTEX_FORMAT,
+        String vertexExecutionStats = String.format(SUMMARY_HEADER_FORMAT,
             vertexName,
-            totalTasks,
-            failedTaskAttempts,
-            killedTaskAttempts,
             secondsFormat.format((duration)),
             commaFormat.format(cpuTimeMillis),
             commaFormat.format(gcTimeMillis),
@@ -555,6 +570,71 @@ public class TezJobMonitor {
     }
   }
 
+
+  private String humanReadableByteCount(long bytes) {
+    int unit = 1000; // use binary units instead?
+    if (bytes < unit) {
+      return bytes + "B";
+    }
+    int exp = (int) (Math.log(bytes) / Math.log(unit));
+    String suffix = "KMGTPE".charAt(exp-1) + "";
+    return String.format("%.2f%sB", bytes / Math.pow(unit, exp), suffix);
+  }
+
+  private void printLlapIOSummary(Map<String, Progress> progressMap, LogHelper console,
+      DAGClient dagClient) throws Exception {
+    SortedSet<String> keys = new TreeSet<>(progressMap.keySet());
+    Set<StatusGetOpts> statusOptions = new HashSet<>(1);
+    statusOptions.add(StatusGetOpts.GET_COUNTERS);
+    boolean first = false;
+    String counterGroup = LlapIOCounters.class.getName();
+    for (String vertexName : keys) {
+      // Reducers do not benefit from LLAP IO so no point in printing
+      if (vertexName.startsWith("Reducer")) {
+        continue;
+      }
+      TezCounters vertexCounters = dagClient.getVertexStatus(vertexName, statusOptions)
+          .getVertexCounters();
+      if (vertexCounters != null) {
+        final long selectedRowgroups = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.SELECTED_ROWGROUPS.name());
+        final long metadataCacheHit = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.METADATA_CACHE_HIT.name());
+        final long metadataCacheMiss = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.METADATA_CACHE_MISS.name());
+        final long cacheHitBytes = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.CACHE_HIT_BYTES.name());
+        final long cacheMissBytes = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.CACHE_MISS_BYTES.name());
+        final long allocatedBytes = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.ALLOCATED_BYTES.name());
+        final long allocatedUsedBytes = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.ALLOCATED_USED_BYTES.name());
+        final long totalIoTime = getCounterValueByGroupName(vertexCounters,
+            counterGroup, LlapIOCounters.TOTAL_IO_TIME_NS.name());
+
+        if (!first) {
+          console.printInfo(SEPARATOR);
+          reprintLineWithColorAsBold(LLAP_SUMMARY_HEADER, Ansi.Color.CYAN);
+          console.printInfo(SEPARATOR);
+          first = true;
+        }
+
+        String queryFragmentStats = String.format(LLAP_SUMMARY_HEADER_FORMAT,
+            vertexName,
+            selectedRowgroups,
+            metadataCacheHit,
+            metadataCacheMiss,
+            humanReadableByteCount(cacheHitBytes),
+            humanReadableByteCount(cacheMissBytes),
+            humanReadableByteCount(allocatedBytes),
+            humanReadableByteCount(allocatedUsedBytes),
+            secondsFormat.format(totalIoTime / 1000_000_000.0) + "s");
+        console.printInfo(queryFragmentStats);
+      }
+    }
+  }
+
   private void printStatusInPlace(Map<String, Progress> progressMap, long startTime,
       boolean vextexStatusFromAM, DAGClient dagClient) {
     StringBuilder reportBuffer = new StringBuilder();
@@ -568,9 +648,9 @@ public class TezJobMonitor {
     // -------------------------------------------------------------------------------
     //         VERTICES     STATUS  TOTAL  COMPLETED  RUNNING  PENDING  FAILED  KILLED
     // -------------------------------------------------------------------------------
-    reprintLine(separator);
+    reprintLine(SEPARATOR);
     reprintLineWithColorAsBold(HEADER, Ansi.Color.CYAN);
-    reprintLine(separator);
+    reprintLine(SEPARATOR);
 
     SortedSet<String> keys = new TreeSet<String>(progressMap.keySet());
     int idx = 0;
@@ -663,11 +743,11 @@ public class TezJobMonitor {
     // -------------------------------------------------------------------------------
     // VERTICES: 03/04            [=================>>-----] 86%  ELAPSED TIME: 1.71 s
     // -------------------------------------------------------------------------------
-    reprintLine(separator);
+    reprintLine(SEPARATOR);
     final float progress = (sumTotal == 0) ? 0.0f : (float) sumComplete / (float) sumTotal;
     String footer = getFooter(keys.size(), completed.size(), progress, startTime);
     reprintLineWithColorAsBold(footer, Ansi.Color.RED);
-    reprintLine(separator);
+    reprintLine(SEPARATOR);
   }
 
   private String getMode(String name, Map<String, BaseWork> workMap) {


[12/51] [abbrv] hive git commit: HIVE-13216 : ORC Reader will leave file open until GC when opening a malformed ORC file (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by jd...@apache.org.
HIVE-13216 : ORC Reader will leave file open until GC when opening a malformed ORC file (Sergey Shelukhin, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3931d4d6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3931d4d6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3931d4d6

Branch: refs/heads/llap
Commit: 3931d4d67fe2ca930f0ca6ed2d9bd6ff37ff9087
Parents: 61b6644
Author: Sergey Shelukhin <se...@apache.org>
Authored: Wed Mar 9 10:51:58 2016 -0800
Committer: Sergey Shelukhin <se...@apache.org>
Committed: Wed Mar 9 10:51:58 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/ReaderImpl.java       | 108 ++++++++++---------
 1 file changed, 57 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3931d4d6/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
index 1299c9c..773c2b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java
@@ -485,61 +485,67 @@ public class ReaderImpl implements Reader {
                                                         long maxFileLength
                                                         ) throws IOException {
     FSDataInputStream file = fs.open(path);
+    ByteBuffer buffer = null, fullFooterBuffer = null;
+    OrcProto.PostScript ps = null;
+    OrcFile.WriterVersion writerVersion = null;
+    try {
+      // figure out the size of the file using the option or filesystem
+      long size;
+      if (maxFileLength == Long.MAX_VALUE) {
+        size = fs.getFileStatus(path).getLen();
+      } else {
+        size = maxFileLength;
+      }
 
-    // figure out the size of the file using the option or filesystem
-    long size;
-    if (maxFileLength == Long.MAX_VALUE) {
-      size = fs.getFileStatus(path).getLen();
-    } else {
-      size = maxFileLength;
-    }
-
-    //read last bytes into buffer to get PostScript
-    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
-    ByteBuffer buffer = ByteBuffer.allocate(readSize);
-    assert buffer.position() == 0;
-    file.readFully((size - readSize),
-        buffer.array(), buffer.arrayOffset(), readSize);
-    buffer.position(0);
-
-    //read the PostScript
-    //get length of PostScript
-    int psLen = buffer.get(readSize - 1) & 0xff;
-    ensureOrcFooter(file, path, psLen, buffer);
-    int psOffset = readSize - 1 - psLen;
-    OrcProto.PostScript ps = extractPostScript(buffer, path, psLen, psOffset);
-
-    int footerSize = (int) ps.getFooterLength();
-    int metadataSize = (int) ps.getMetadataLength();
-    OrcFile.WriterVersion writerVersion = extractWriterVersion(ps);
-
-
-    //check if extra bytes need to be read
-    ByteBuffer fullFooterBuffer = null;
-    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
-    if (extra > 0) {
-      //more bytes need to be read, seek back to the right place and read extra bytes
-      ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
-      file.readFully((size - readSize - extra), extraBuf.array(),
-          extraBuf.arrayOffset() + extraBuf.position(), extra);
-      extraBuf.position(extra);
-      //append with already read bytes
-      extraBuf.put(buffer);
-      buffer = extraBuf;
+      //read last bytes into buffer to get PostScript
+      int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
+      buffer = ByteBuffer.allocate(readSize);
+      assert buffer.position() == 0;
+      file.readFully((size - readSize),
+          buffer.array(), buffer.arrayOffset(), readSize);
       buffer.position(0);
-      fullFooterBuffer = buffer.slice();
-      buffer.limit(footerSize + metadataSize);
-    } else {
-      //footer is already in the bytes in buffer, just adjust position, length
-      buffer.position(psOffset - footerSize - metadataSize);
-      fullFooterBuffer = buffer.slice();
-      buffer.limit(psOffset);
-    }
 
-    // remember position for later
-    buffer.mark();
+      //read the PostScript
+      //get length of PostScript
+      int psLen = buffer.get(readSize - 1) & 0xff;
+      ensureOrcFooter(file, path, psLen, buffer);
+      int psOffset = readSize - 1 - psLen;
+      ps = extractPostScript(buffer, path, psLen, psOffset);
+
+      int footerSize = (int) ps.getFooterLength();
+      int metadataSize = (int) ps.getMetadataLength();
+      writerVersion = extractWriterVersion(ps);
+
+      //check if extra bytes need to be read
+      int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
+      if (extra > 0) {
+        //more bytes need to be read, seek back to the right place and read extra bytes
+        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
+        file.readFully((size - readSize - extra), extraBuf.array(),
+            extraBuf.arrayOffset() + extraBuf.position(), extra);
+        extraBuf.position(extra);
+        //append with already read bytes
+        extraBuf.put(buffer);
+        buffer = extraBuf;
+        buffer.position(0);
+        fullFooterBuffer = buffer.slice();
+        buffer.limit(footerSize + metadataSize);
+      } else {
+        //footer is already in the bytes in buffer, just adjust position, length
+        buffer.position(psOffset - footerSize - metadataSize);
+        fullFooterBuffer = buffer.slice();
+        buffer.limit(psOffset);
+      }
 
-    file.close();
+      // remember position for later TODO: what later? this comment is useless
+      buffer.mark();
+    } finally {
+      try {
+        file.close();
+      } catch (IOException ex) {
+        LOG.error("Failed to close the file after another error", ex);
+      }
+    }
 
     return new FileMetaInfo(
         ps.getCompression().toString(),


[30/51] [abbrv] hive git commit: HIVE-13218 : LLAP: better configs part 1 (Sergey Shelukhin, reviewed by Gopal V)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
index db21036..3f159a3 100644
--- a/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/vectorized_dynamic_partition_pruning.q.out
@@ -74,7 +74,7 @@ STAGE PLANS:
                         Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -286,7 +286,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -398,7 +398,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -593,7 +593,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -748,7 +748,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -906,7 +906,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1018,7 +1018,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1158,7 +1158,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1270,7 +1270,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1408,7 +1408,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1535,7 +1535,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1647,7 +1647,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1759,7 +1759,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -1899,7 +1899,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2025,7 +2025,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2184,7 +2184,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                         value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2328,7 +2328,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2457,7 +2457,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2564,7 +2564,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2673,7 +2673,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2821,7 +2821,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -2975,7 +2975,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3106,7 +3106,7 @@ STAGE PLANS:
                     Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
                     value expressions: _col0 (type: bigint)
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3121,7 +3121,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3157,7 +3157,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 8 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3313,7 +3313,7 @@ STAGE PLANS:
                     Map-reduce partition columns: _col0 (type: string)
                     Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3328,7 +3328,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 5 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3364,7 +3364,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 8 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3519,7 +3519,7 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
         Reducer 11 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -3570,7 +3570,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 5
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3601,7 +3601,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 6 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -3614,7 +3614,7 @@ STAGE PLANS:
                   Map-reduce partition columns: _col0 (type: string)
                   Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
         Reducer 8 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -3779,7 +3779,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -3962,7 +3962,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4115,7 +4115,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4250,7 +4250,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4361,7 +4361,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4483,7 +4483,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4604,7 +4604,7 @@ STAGE PLANS:
                       Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4769,7 +4769,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4856,7 +4856,7 @@ STAGE PLANS:
                       Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
             Execution mode: llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -4945,7 +4945,7 @@ STAGE PLANS:
                             value expressions: _col0 (type: bigint)
             Execution mode: vectorized, llap
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5081,7 +5081,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5224,7 +5224,7 @@ STAGE PLANS:
                         Statistics: Num rows: 1 Data size: 172 Basic stats: COMPLETE Column stats: NONE
             Execution mode: vectorized, llap
         Reducer 3 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)
@@ -5345,7 +5345,7 @@ STAGE PLANS:
                         value expressions: _col0 (type: string)
             Execution mode: llap
         Reducer 2 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 keys: KEY._col0 (type: string)
@@ -5360,7 +5360,7 @@ STAGE PLANS:
                       output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                       serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
         Reducer 4 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: max(VALUE._col0)
@@ -5396,7 +5396,7 @@ STAGE PLANS:
                           Statistics: Num rows: 2 Data size: 168 Basic stats: COMPLETE Column stats: NONE
                           Target Vertex: Map 1
         Reducer 7 
-            Execution mode: vectorized, uber
+            Execution mode: vectorized, llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: min(VALUE._col0)
@@ -5591,7 +5591,7 @@ STAGE PLANS:
                             Target Vertex: Map 1
             Execution mode: llap
         Reducer 2 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Group By Operator
                 aggregations: count(VALUE._col0)

http://git-wip-us.apache.org/repos/asf/hive/blob/428a930c/ql/src/test/results/clientpositive/tez/llapdecider.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/llapdecider.q.out b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
index 2b0e639..db3bf22 100644
--- a/ql/src/test/results/clientpositive/tez/llapdecider.q.out
+++ b/ql/src/test/results/clientpositive/tez/llapdecider.q.out
@@ -1008,7 +1008,7 @@ STAGE PLANS:
                   Statistics: Num rows: 1219 Data size: 433964 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string)
         Reducer 3 
-            Execution mode: uber
+            Execution mode: llap
             Reduce Operator Tree:
               Select Operator
                 expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), KEY.reducesinkkey0 (type: string)


[35/51] [abbrv] hive git commit: HIVE-13251: hive can't read the decimal in AVRO file generated from previous version (Reviewed by Szehon Ho)

Posted by jd...@apache.org.
HIVE-13251: hive can't read the decimal in AVRO file generated from previous version (Reviewed by Szehon Ho)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d4c1fdcf
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d4c1fdcf
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d4c1fdcf

Branch: refs/heads/llap
Commit: d4c1fdcf26ad8a12d77841def6afead86377b1ac
Parents: de260b4
Author: Aihua Xu <ai...@apache.org>
Authored: Thu Mar 10 10:29:40 2016 -0500
Committer: Aihua Xu <ai...@apache.org>
Committed: Mon Mar 14 11:07:24 2016 -0400

----------------------------------------------------------------------
 data/files/dec_old.avro                         | Bin 0 -> 331 bytes
 .../test/queries/clientnegative/avro_decimal.q  |  17 ++++++
 .../queries/clientpositive/avro_decimal_old.q   |  14 +++++
 .../results/clientnegative/avro_decimal.q.out   |  22 +++++++
 .../clientpositive/avro_decimal_old.q.out       |  60 +++++++++++++++++++
 .../hive/serde2/avro/AvroDeserializer.java      |   2 +-
 6 files changed, 114 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/data/files/dec_old.avro
----------------------------------------------------------------------
diff --git a/data/files/dec_old.avro b/data/files/dec_old.avro
new file mode 100644
index 0000000..bf87763
Binary files /dev/null and b/data/files/dec_old.avro differ

http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/ql/src/test/queries/clientnegative/avro_decimal.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/avro_decimal.q b/ql/src/test/queries/clientnegative/avro_decimal.q
new file mode 100644
index 0000000..538e687
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/avro_decimal.q
@@ -0,0 +1,17 @@
+DROP TABLE IF EXISTS avro_dec;
+
+CREATE TABLE `avro_dec`(
+  `name` string COMMENT 'from deserializer',
+  `value` decimal(5,2) COMMENT 'from deserializer')
+COMMENT 'just drop the schema right into the HQL'
+ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
+TBLPROPERTIES (
+  'numFiles'='1',
+  'avro.schema.literal'='{\"namespace\":\"com.howdy\",\"name\":\"some_schema\",\"type\":\"record\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"value\",\"type\":{\"type\":\"bytes\",\"logicalType\":\"decimal\",\"precision\":"5",\"scale\":"2"}}]}'
+);
+

http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/ql/src/test/queries/clientpositive/avro_decimal_old.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/avro_decimal_old.q b/ql/src/test/queries/clientpositive/avro_decimal_old.q
new file mode 100644
index 0000000..9610c47
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/avro_decimal_old.q
@@ -0,0 +1,14 @@
+DROP TABLE IF EXISTS avro_dec_old;
+
+CREATE TABLE `avro_dec_old`(
+  `name` string COMMENT 'from deserializer',
+  `value` decimal(4,1) COMMENT 'from deserializer')
+STORED AS AVRO;
+
+DESC avro_dec_old;
+
+LOAD DATA LOCAL INPATH '../../data/files/dec_old.avro' into TABLE avro_dec_old;
+
+select value from avro_dec_old;
+
+DROP TABLE avro_dec_old;

http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/ql/src/test/results/clientnegative/avro_decimal.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/avro_decimal.q.out b/ql/src/test/results/clientnegative/avro_decimal.q.out
new file mode 100644
index 0000000..9d00d6e
--- /dev/null
+++ b/ql/src/test/results/clientnegative/avro_decimal.q.out
@@ -0,0 +1,22 @@
+PREHOOK: query: DROP TABLE IF EXISTS avro_dec
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS avro_dec
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE `avro_dec`(
+  `name` string COMMENT 'from deserializer',
+  `value` decimal(5,2) COMMENT 'from deserializer')
+COMMENT 'just drop the schema right into the HQL'
+ROW FORMAT SERDE
+  'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
+STORED AS INPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat'
+OUTPUTFORMAT
+  'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat'
+TBLPROPERTIES (
+  'numFiles'='1',
+  'avro.schema.literal'='{\"namespace\":\"com.howdy\",\"name\":\"some_schema\",\"type\":\"record\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"value\",\"type\":{\"type\":\"bytes\",\"logicalType\":\"decimal\",\"precision\":"5",\"scale\":"2"}}]}'
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_dec
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.avro.AvroSerdeException Invalid precision or scale for decimal type)

http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/ql/src/test/results/clientpositive/avro_decimal_old.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/avro_decimal_old.q.out b/ql/src/test/results/clientpositive/avro_decimal_old.q.out
new file mode 100644
index 0000000..22efe39
--- /dev/null
+++ b/ql/src/test/results/clientpositive/avro_decimal_old.q.out
@@ -0,0 +1,60 @@
+PREHOOK: query: DROP TABLE IF EXISTS avro_dec_old
+PREHOOK: type: DROPTABLE
+POSTHOOK: query: DROP TABLE IF EXISTS avro_dec_old
+POSTHOOK: type: DROPTABLE
+PREHOOK: query: CREATE TABLE `avro_dec_old`(
+  `name` string COMMENT 'from deserializer',
+  `value` decimal(4,1) COMMENT 'from deserializer')
+STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@avro_dec_old
+POSTHOOK: query: CREATE TABLE `avro_dec_old`(
+  `name` string COMMENT 'from deserializer',
+  `value` decimal(4,1) COMMENT 'from deserializer')
+STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@avro_dec_old
+PREHOOK: query: DESC avro_dec_old
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@avro_dec_old
+POSTHOOK: query: DESC avro_dec_old
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@avro_dec_old
+name                	string              	from deserializer   
+value               	decimal(4,1)        	from deserializer   
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec_old.avro' into TABLE avro_dec_old
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@avro_dec_old
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec_old.avro' into TABLE avro_dec_old
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@avro_dec_old
+PREHOOK: query: select value from avro_dec_old
+PREHOOK: type: QUERY
+PREHOOK: Input: default@avro_dec_old
+#### A masked pattern was here ####
+POSTHOOK: query: select value from avro_dec_old
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@avro_dec_old
+#### A masked pattern was here ####
+234.8
+77.3
+55.7
+4.3
+6.0
+12.3
+33.3
+19.0
+3.2
+79.9
+PREHOOK: query: DROP TABLE avro_dec_old
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@avro_dec_old
+PREHOOK: Output: default@avro_dec_old
+POSTHOOK: query: DROP TABLE avro_dec_old
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@avro_dec_old
+POSTHOOK: Output: default@avro_dec_old

http://git-wip-us.apache.org/repos/asf/hive/blob/d4c1fdcf/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java
----------------------------------------------------------------------
diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java
index 4bba3d4..6165138 100644
--- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java
+++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java
@@ -244,7 +244,7 @@ class AvroDeserializer {
 
       int scale = 0;
       try {
-        scale = fileSchema.getJsonProp(AvroSerDe.AVRO_PROP_SCALE).getIntValue();
+        scale = fileSchema.getJsonProp(AvroSerDe.AVRO_PROP_SCALE).asInt();
       } catch(Exception ex) {
         throw new AvroSerdeException("Failed to obtain scale value from file schema: " + fileSchema, ex);
       }


[46/51] [abbrv] hive git commit: HIVE-11675 : make use of file footer PPD API in ETL strategy or separate strategy (Sergey Shelukhin, reviewed by Prasanth Jayachandran)

Posted by jd...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index 80208c2..6d27f55 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -89,6 +89,7 @@ import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 import org.apache.hadoop.hive.metastore.api.Index;
 import org.apache.hadoop.hive.metastore.api.InsertEventRequestData;
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
@@ -3485,7 +3486,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
   }
 
   public Iterable<Map.Entry<Long, ByteBuffer>> getFileMetadata(
-      List<Long> fileIds, Configuration conf) throws HiveException {
+      List<Long> fileIds) throws HiveException {
     try {
       return getMSC().getFileMetadata(fileIds);
     } catch (TException e) {
@@ -3493,6 +3494,15 @@ private void constructOneLBLocationMap(FileStatus fSta,
     }
   }
 
+  public Iterable<Map.Entry<Long, MetadataPpdResult>> getFileMetadataByExpr(
+      List<Long> fileIds, ByteBuffer sarg, boolean doGetFooters) throws HiveException {
+    try {
+      return getMSC().getFileMetadataBySarg(fileIds, sarg, doGetFooters);
+    } catch (TException e) {
+      throw new HiveException(e);
+    }
+  }
+
   public void clearFileMetadata(List<Long> fileIds) throws HiveException {
     try {
       getMSC().clearFileMetadata(fileIds);

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
index f824e18..1a64f3a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
@@ -65,12 +65,12 @@ import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
 import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
 import org.apache.hadoop.hive.ql.io.AcidInputFormat;
 import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
-import org.apache.hadoop.hive.ql.io.AcidUtils;
 import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.io.IOConstants;
 import org.apache.hadoop.hive.ql.io.InputFormatChecker;
+import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.Context;
 import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat.SplitStrategy;
 import org.apache.hadoop.hive.ql.io.sarg.ConvertAstToSearchArg;
 import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf;
@@ -1113,7 +1113,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
-            AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
+            fs.getFileStatus(new Path("/a/file")), null, true,
             new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     OrcSplit result = splitter.createSplit(0, 200, null);
     assertEquals(0, result.getStart());
@@ -1154,7 +1154,7 @@ public class TestInputOutputFormat {
     OrcInputFormat.Context context = new OrcInputFormat.Context(conf);
     OrcInputFormat.SplitGenerator splitter =
         new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
-            AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
+            fs.getFileStatus(new Path("/a/file")), null, true,
             new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     List<OrcSplit> results = splitter.call();
     OrcSplit result = results.get(0);
@@ -1177,7 +1177,7 @@ public class TestInputOutputFormat {
     HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0);
     context = new OrcInputFormat.Context(conf);
     splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
-      AcidUtils.createOriginalObj(null, fs.getFileStatus(new Path("/a/file"))), null, true,
+      fs.getFileStatus(new Path("/a/file")), null, true,
         new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null), null, true);
     results = splitter.call();
     for(int i=0; i < stripeSizes.length; ++i) {
@@ -2165,7 +2165,7 @@ public class TestInputOutputFormat {
         ugi.doAs(new PrivilegedExceptionAction<Void>() {
           @Override
           public Void run() throws Exception {
-            OrcInputFormat.generateSplitsInfo(conf, -1);
+            OrcInputFormat.generateSplitsInfo(conf, new Context(conf, -1, null));
             return null;
           }
         });
@@ -2184,7 +2184,7 @@ public class TestInputOutputFormat {
       }
       assertEquals(1, OrcInputFormat.Context.getCurrentThreadPoolSize());
       FileInputFormat.setInputPaths(conf, "mock:/ugi/2");
-      List<OrcSplit> splits = OrcInputFormat.generateSplitsInfo(conf, -1);
+      List<OrcSplit> splits = OrcInputFormat.generateSplitsInfo(conf, new Context(conf, -1, null));
       assertEquals(1, splits.size());
     } finally {
       MockFileSystem.clearGlobalFiles();

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
index 7a93b54..62a0ab0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java
@@ -18,18 +18,34 @@
 
 package org.apache.hadoop.hive.ql.io.orc;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.sql.Timestamp;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
+import org.apache.hadoop.hive.metastore.hbase.MetadataStore;
 import org.apache.hadoop.hive.ql.exec.SerializationUtilities;
+import org.apache.hadoop.hive.ql.io.orc.ExternalCache.ExternalFooterCachesByConf;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore;
 import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
 import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
@@ -43,6 +59,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.FileInputFormat;
+import org.apache.hadoop.mapred.FileSplit;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.InputSplit;
 import org.apache.hadoop.mapred.JobConf;
@@ -50,6 +67,8 @@ import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 
@@ -76,7 +95,7 @@ public class TestOrcSplitElimination {
 
   JobConf conf;
   FileSystem fs;
-  Path testFilePath;
+  Path testFilePath, testFilePath2;
 
   @Rule
   public TestName testCaseName = new TestName();
@@ -94,17 +113,15 @@ public class TestOrcSplitElimination {
     fs = FileSystem.getLocal(conf);
     testFilePath = new Path(workDir, "TestOrcFile." +
         testCaseName.getMethodName() + ".orc");
+    testFilePath2 = new Path(workDir, "TestOrcFile." +
+    testCaseName.getMethodName() + ".2.orc");
     fs.delete(testFilePath, false);
+    fs.delete(testFilePath2, false);
   }
 
   @Test
   public void testSplitEliminationSmallMaxSplit() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(AllTypesRow.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
+    ObjectInspector inspector = createIO();
     Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         100000, CompressionKind.NONE, 10000, 10000);
     writeData(writer);
@@ -116,13 +133,10 @@ public class TestOrcSplitElimination {
 
     GenericUDF udf = new GenericUDFOPEqualOrLessThan();
     List<ExprNodeDesc> childExpr = Lists.newArrayList();
-    ExprNodeColumnDesc col = new ExprNodeColumnDesc(Long.class, "userid", "T", false);
-    ExprNodeConstantDesc con = new ExprNodeConstantDesc(100);
-    childExpr.add(col);
-    childExpr.add(con);
-    ExprNodeGenericFuncDesc en = new ExprNodeGenericFuncDesc(inspector, udf, childExpr);
-    String sargStr = SerializationUtilities.serializeExpression(en);
-    conf.set("hive.io.filter.expr.serialized", sargStr);
+    ExprNodeConstantDesc con;
+    ExprNodeGenericFuncDesc en;
+    String sargStr;
+    createTestSarg(inspector, udf, childExpr);
     InputSplit[] splits = in.getSplits(conf, 1);
     assertEquals(5, splits.length);
 
@@ -177,12 +191,7 @@ public class TestOrcSplitElimination {
 
   @Test
   public void testSplitEliminationLargeMaxSplit() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(AllTypesRow.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
+    ObjectInspector inspector = createIO();
     Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         100000, CompressionKind.NONE, 10000, 10000);
     writeData(writer);
@@ -194,13 +203,10 @@ public class TestOrcSplitElimination {
 
     GenericUDF udf = new GenericUDFOPEqualOrLessThan();
     List<ExprNodeDesc> childExpr = Lists.newArrayList();
-    ExprNodeColumnDesc col = new ExprNodeColumnDesc(Long.class, "userid", "T", false);
-    ExprNodeConstantDesc con = new ExprNodeConstantDesc(100);
-    childExpr.add(col);
-    childExpr.add(con);
-    ExprNodeGenericFuncDesc en = new ExprNodeGenericFuncDesc(inspector, udf, childExpr);
-    String sargStr = SerializationUtilities.serializeExpression(en);
-    conf.set("hive.io.filter.expr.serialized", sargStr);
+    ExprNodeConstantDesc con;
+    ExprNodeGenericFuncDesc en;
+    String sargStr;
+    createTestSarg(inspector, udf, childExpr);
     InputSplit[] splits = in.getSplits(conf, 1);
     assertEquals(2, splits.length);
 
@@ -266,12 +272,7 @@ public class TestOrcSplitElimination {
 
   @Test
   public void testSplitEliminationComplexExpr() throws Exception {
-    ObjectInspector inspector;
-    synchronized (TestOrcFile.class) {
-      inspector = ObjectInspectorFactory
-          .getReflectionObjectInspector(AllTypesRow.class,
-              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
-    }
+    ObjectInspector inspector = createIO();
     Writer writer = OrcFile.createWriter(fs, testFilePath, conf, inspector,
         100000, CompressionKind.NONE, 10000, 10000);
     writeData(writer);
@@ -385,6 +386,342 @@ public class TestOrcSplitElimination {
     assertEquals(1, splits.length);
   }
 
+  private static class OrcInputFormatForTest extends OrcInputFormat {
+    public static void clearLocalCache() {
+      OrcInputFormat.Context.clearLocalCache();
+    }
+    static MockExternalCaches caches = new MockExternalCaches();
+    @Override
+    protected ExternalFooterCachesByConf createExternalCaches() {
+      return caches;
+    }
+  }
+
+  private static class MockExternalCaches
+    implements ExternalFooterCachesByConf, ExternalFooterCachesByConf.Cache, MetadataStore {
+    private static class MockItem {
+      ByteBuffer data;
+      ByteBuffer[] extraCols;
+      ByteBuffer[] extraData;
+
+      @Override
+      public String toString() {
+        return (data == null ? 0 : data.remaining()) + " bytes"
+            + (extraCols == null ? "" : ("; " + extraCols.length + " extras"));
+      }
+    }
+    private final Map<Long, MockItem> cache = new ConcurrentHashMap<>();
+    private final OrcFileMetadataHandler handler = new OrcFileMetadataHandler();
+    private final AtomicInteger putCount = new AtomicInteger(0),
+        getCount = new AtomicInteger(0), getHitCount = new AtomicInteger(0),
+        getByExprCount = new AtomicInteger(0), getHitByExprCount = new AtomicInteger();
+
+    public void resetCounts() {
+      getByExprCount.set(0);
+      getCount.set(0);
+      putCount.set(0);
+      getHitCount.set(0);
+      getHitByExprCount.set(0);
+    }
+
+    @Override
+    public Cache getCache(HiveConf conf) throws IOException {
+      handler.configure(conf, new PartitionExpressionForMetastore(), this);
+      return this;
+    }
+
+    @Override
+    public Iterator<Entry<Long, MetadataPpdResult>> getFileMetadataByExpr(
+        List<Long> fileIds, ByteBuffer sarg, boolean doGetFooters) throws HiveException {
+      getByExprCount.incrementAndGet();
+      ByteBuffer[] metadatas = new ByteBuffer[fileIds.size()];
+      ByteBuffer[] ppdResults = new ByteBuffer[fileIds.size()];
+      boolean[] eliminated = new boolean[fileIds.size()];
+      try {
+        byte[] bb = new byte[sarg.remaining()];
+        System.arraycopy(sarg.array(), sarg.arrayOffset(), bb, 0, sarg.remaining());
+        handler.getFileMetadataByExpr(fileIds, bb, metadatas, ppdResults, eliminated);
+      } catch (IOException e) {
+        throw new HiveException(e);
+      }
+      Map<Long, MetadataPpdResult> result = new HashMap<>();
+      for (int i = 0; i < metadatas.length; ++i) {
+        long fileId = fileIds.get(i);
+        ByteBuffer metadata = metadatas[i];
+        if (metadata == null) continue;
+        getHitByExprCount.incrementAndGet();
+        metadata = eliminated[i] ? null : metadata;
+        MetadataPpdResult mpr = new MetadataPpdResult();
+        ByteBuffer bitset = eliminated[i] ? null : ppdResults[i];
+        mpr.setMetadata(doGetFooters ? metadata : null);
+        mpr.setIncludeBitset(bitset);
+        result.put(fileId, mpr);
+      }
+      return result.entrySet().iterator();
+    }
+
+    @Override
+    public void clearFileMetadata(List<Long> fileIds) throws HiveException {
+      for (Long id : fileIds) {
+        cache.remove(id);
+      }
+    }
+
+    @Override
+    public Iterator<Entry<Long, ByteBuffer>> getFileMetadata(List<Long> fileIds)
+        throws HiveException {
+      getCount.incrementAndGet();
+      HashMap<Long, ByteBuffer> result = new HashMap<>();
+      for (Long id : fileIds) {
+        MockItem mi = cache.get(id);
+        if (mi == null) continue;
+        getHitCount.incrementAndGet();
+        result.put(id, mi.data);
+      }
+      return result.entrySet().iterator();
+    }
+
+    @Override
+    public void putFileMetadata(ArrayList<Long> fileIds,
+        ArrayList<ByteBuffer> values) throws HiveException {
+      putCount.incrementAndGet();
+      ByteBuffer[] addedCols = handler.createAddedCols();
+      ByteBuffer[][] addedVals = null;
+      if (addedCols != null) {
+        addedVals = handler.createAddedColVals(values);
+      }
+      try {
+        storeFileMetadata(fileIds, values, addedCols, addedVals);
+      } catch (IOException | InterruptedException e) {
+        throw new HiveException(e);
+      }
+    }
+
+    // MetadataStore
+    @Override
+    public void getFileMetadata(List<Long> fileIds, ByteBuffer[] result) throws IOException {
+      for (int i = 0; i < fileIds.size(); ++i) {
+        MockItem mi = cache.get(fileIds.get(i));
+        result[i] = (mi == null ? null : mi.data);
+      }
+    }
+
+    @Override
+    public void storeFileMetadata(List<Long> fileIds, List<ByteBuffer> metadataBuffers,
+        ByteBuffer[] addedCols, ByteBuffer[][] addedVals)
+            throws IOException, InterruptedException {
+      for (int i = 0; i < fileIds.size(); ++i) {
+        ByteBuffer value = (metadataBuffers != null) ? metadataBuffers.get(i) : null;
+        ByteBuffer[] av = addedVals == null ? null : addedVals[i];
+        storeFileMetadata(fileIds.get(i), value, addedCols, av);
+      }
+    }
+
+    @Override
+    public void storeFileMetadata(long fileId, ByteBuffer metadata,
+        ByteBuffer[] addedCols, ByteBuffer[] addedVals) throws IOException, InterruptedException {
+      if (metadata == null) {
+        cache.remove(metadata);
+        return;
+      }
+      MockItem mi = new MockItem();
+      mi.data = metadata;
+      if (addedVals != null) {
+        mi.extraCols = addedCols;
+        mi.extraData = addedVals;
+      }
+      cache.put(fileId, mi);
+    }
+  }
+
+  private static final Logger LOG = LoggerFactory.getLogger(TestOrcSplitElimination.class);
+
+  @Test
+  public void testExternalFooterCache() throws Exception {
+    testFooterExternalCacheImpl(false);
+  }
+
+  @Test
+  public void testExternalFooterCachePpd() throws Exception {
+    testFooterExternalCacheImpl(true);
+  }
+
+  private final static class FsWithHash {
+    private FileSplit fs;
+    public FsWithHash(FileSplit fs) {
+      this.fs = fs;
+    }
+    @Override
+    public int hashCode() {
+      if (fs == null) return 0;
+      final int prime = 31;
+      int result = prime * 1 + fs.getPath().hashCode();
+      result = prime * result + Long.valueOf(fs.getStart()).hashCode();
+      return prime * result + Long.valueOf(fs.getLength()).hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) return true;
+      if (!(obj instanceof FsWithHash)) return false;
+      FsWithHash other = (FsWithHash)obj;
+      if ((fs == null) != (other.fs == null)) return false;
+      if (fs == null && other.fs == null) return true;
+      return fs.getStart() == other.fs.getStart() && fs.getLength() == other.fs.getLength()
+          && fs.getPath().equals(other.fs.getPath());
+    }
+  }
+
+  private void testFooterExternalCacheImpl(boolean isPpd) throws IOException {
+    ObjectInspector inspector = createIO();
+    writeFile(inspector, testFilePath);
+    writeFile(inspector, testFilePath2);
+
+    GenericUDF udf = new GenericUDFOPEqualOrLessThan();
+    List<ExprNodeDesc> childExpr = Lists.newArrayList();
+    createTestSarg(inspector, udf, childExpr);
+    setupExternalCacheConfig(isPpd, testFilePath + "," + testFilePath2);
+    // Get the base values w/o cache.
+    conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED.varname, false);
+    OrcInputFormatForTest.clearLocalCache();
+    OrcInputFormat in0 = new OrcInputFormat();
+    InputSplit[] originals = in0.getSplits(conf, -1);
+    assertEquals(10, originals.length);
+    HashSet<FsWithHash> originalHs = new HashSet<>();
+    for (InputSplit original : originals) {
+      originalHs.add(new FsWithHash((FileSplit)original));
+    }
+
+    // Populate the cache.
+    conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_ENABLED.varname, true);
+    OrcInputFormatForTest in = new OrcInputFormatForTest();
+    OrcInputFormatForTest.clearLocalCache();
+    OrcInputFormatForTest.caches.resetCounts();
+    OrcInputFormatForTest.caches.cache.clear();
+    InputSplit[] splits = in.getSplits(conf, -1);
+    // Puts, gets, hits, unused, unused.
+    @SuppressWarnings("static-access")
+    AtomicInteger[] counts = { in.caches.putCount,
+        isPpd ? in.caches.getByExprCount : in.caches.getCount,
+        isPpd ? in.caches.getHitByExprCount : in.caches.getHitCount,
+        isPpd ? in.caches.getCount : in.caches.getByExprCount,
+        isPpd ? in.caches.getHitCount : in.caches.getHitByExprCount };
+
+    verifySplits(originalHs, splits);
+    verifyCallCounts(counts, 2, 2, 0);
+    assertEquals(2, OrcInputFormatForTest.caches.cache.size());
+
+    // Verify we can get from cache.
+    OrcInputFormatForTest.clearLocalCache();
+    OrcInputFormatForTest.caches.resetCounts();
+    splits = in.getSplits(conf, -1);
+    verifySplits(originalHs, splits);
+    verifyCallCounts(counts, 0, 2, 2);
+
+    // Verify ORC SARG still works.
+    OrcInputFormatForTest.clearLocalCache();
+    OrcInputFormatForTest.caches.resetCounts();
+    childExpr.set(1, new ExprNodeConstantDesc(5));
+    conf.set("hive.io.filter.expr.serialized", SerializationUtilities.serializeExpression(
+        new ExprNodeGenericFuncDesc(inspector, udf, childExpr)));
+    splits = in.getSplits(conf, -1);
+    InputSplit[] filtered = { originals[0], originals[4], originals[5], originals[9] };
+    originalHs = new HashSet<>();
+    for (InputSplit original : filtered) {
+      originalHs.add(new FsWithHash((FileSplit)original));
+    }
+    verifySplits(originalHs, splits);
+    verifyCallCounts(counts, 0, 2, 2);
+
+    // Verify corrupted cache value gets replaced.
+    OrcInputFormatForTest.clearLocalCache();
+    OrcInputFormatForTest.caches.resetCounts();
+    Map.Entry<Long, MockExternalCaches.MockItem> e =
+        OrcInputFormatForTest.caches.cache.entrySet().iterator().next();
+    Long key = e.getKey();
+    byte[] someData = new byte[8];
+    ByteBuffer toCorrupt = e.getValue().data;
+    System.arraycopy(toCorrupt.array(), toCorrupt.arrayOffset(), someData, 0, someData.length);
+    toCorrupt.putLong(0, 0L);
+    splits = in.getSplits(conf, -1);
+    verifySplits(originalHs, splits);
+    if (!isPpd) { // Recovery is not implemented yet for PPD path.
+      ByteBuffer restored = OrcInputFormatForTest.caches.cache.get(key).data;
+      byte[] newData = new byte[someData.length];
+      System.arraycopy(restored.array(), restored.arrayOffset(), newData, 0, newData.length);
+      assertArrayEquals(someData, newData);
+    }
+  }
+
+  private void verifyCallCounts(AtomicInteger[] counts, int puts, int gets, int hits) {
+    assertEquals("puts", puts, counts[0].get());
+    assertEquals("gets", gets, counts[1].get());
+    assertEquals("hits", hits, counts[2].get());
+    assertEquals("unused1", 0, counts[3].get());
+    assertEquals("unused2", 0, counts[4].get());
+  }
+
+  private void verifySplits(HashSet<FsWithHash> originalHs, InputSplit[] splits) {
+    if (originalHs.size() != splits.length) {
+      String s = "Expected [";
+      for (FsWithHash fwh : originalHs) {
+        s += toString(fwh.fs) + ", ";
+      }
+      s += "], actual [";
+      for (InputSplit fs : splits) {
+        s += toString((FileSplit)fs) + ", ";
+      }
+      fail(s + "]");
+    }
+    for (int i = 0; i < splits.length; ++i) {
+      FileSplit fs = (FileSplit)splits[i];
+      if (!originalHs.contains(new FsWithHash((FileSplit)splits[i]))) {
+        String s = " in [";
+        for (FsWithHash fwh : originalHs) {
+          s += toString(fwh.fs) + ", ";
+        }
+        fail("Cannot find " + toString(fs) + s);
+      }
+    }
+
+  }
+
+  private static String toString(FileSplit fs) {
+    return "{" + fs.getPath() + ", " + fs.getStart() + ", "  + fs.getLength() + "}";
+  }
+
+  private void setupExternalCacheConfig(boolean isPpd, String paths) {
+    FileInputFormat.setInputPaths(conf, paths);
+    conf.set(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL");
+    conf.setLong(HiveConf.ConfVars.MAPREDMINSPLITSIZE.varname, 1000);
+    conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, 5000);
+    conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD.varname, isPpd);
+    conf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, isPpd);
+  }
+
+  private ObjectInspector createIO() {
+    synchronized (TestOrcFile.class) {
+      return ObjectInspectorFactory
+          .getReflectionObjectInspector(AllTypesRow.class,
+              ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
+    }
+  }
+
+  private void writeFile(ObjectInspector inspector, Path filePath) throws IOException {
+    Writer writer = OrcFile.createWriter(
+        fs, filePath, conf, inspector, 100000, CompressionKind.NONE, 10000, 10000);
+    writeData(writer);
+    writer.close();
+  }
+
+  private void createTestSarg(
+      ObjectInspector inspector, GenericUDF udf, List<ExprNodeDesc> childExpr) {
+    childExpr.add(new ExprNodeColumnDesc(Long.class, "userid", "T", false));
+    childExpr.add(new ExprNodeConstantDesc(100));
+    conf.set("hive.io.filter.expr.serialized", SerializationUtilities.serializeExpression(
+        new ExprNodeGenericFuncDesc(inspector, udf, childExpr)));
+  }
+
   private void writeData(Writer writer) throws IOException {
     for (int i = 0; i < 25000; i++) {
       if (i == 0) {

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
index dc71db4..469a3da 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/PredicateLeaf.java
@@ -99,5 +99,4 @@ public interface PredicateLeaf {
    *
    */
   public List<Object> getLiteralList();
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/868db42a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
index be5e67b..8c5bab2 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java
@@ -171,7 +171,6 @@ final class SearchArgumentImpl implements SearchArgument {
     }
   }
 
-
   private final List<PredicateLeaf> leaves;
   private final ExpressionTree expression;
 


[34/51] [abbrv] hive git commit: HIVE-4570 : Add more information to GetOperationStatus in Hive Server2 when query is still executing (Rajat Khandelwal, reviwed by Amareshwari)

Posted by jd...@apache.org.
HIVE-4570 : Add more information to GetOperationStatus in Hive Server2 when query is still executing (Rajat Khandelwal, reviwed by Amareshwari)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/de260b45
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/de260b45
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/de260b45

Branch: refs/heads/llap
Commit: de260b45d7334dbc8b0e728ed141582e9e74a380
Parents: ca165db
Author: Rajat Khandelwal <pr...@apache.org>
Authored: Mon Mar 14 12:31:34 2016 +0530
Committer: Amareshwari Sriramadasu <am...@apache.org>
Committed: Mon Mar 14 12:31:34 2016 +0530

----------------------------------------------------------------------
 .../cli/TestEmbeddedThriftBinaryCLIService.java |   1 +
 .../service/cli/session/TestQueryDisplay.java   |   7 +-
 .../java/org/apache/hadoop/hive/ql/Driver.java  |  11 +-
 .../org/apache/hadoop/hive/ql/QueryDisplay.java | 133 +++++---
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |  14 +
 .../org/apache/hadoop/hive/ql/exec/Task.java    |  75 +++--
 .../hadoop/hive/ql/exec/mr/ExecDriver.java      |   6 +
 .../hadoop/hive/ql/exec/mr/MapRedTask.java      |   8 +-
 .../hadoop/hive/ql/history/HiveHistory.java     |   2 +-
 service-rpc/if/TCLIService.thrift               |   9 +
 .../gen/thrift/gen-cpp/TCLIService_types.cpp    |  66 ++++
 .../src/gen/thrift/gen-cpp/TCLIService_types.h  |  28 +-
 .../rpc/thrift/TGetOperationStatusResp.java     | 312 ++++++++++++++++++-
 service-rpc/src/gen/thrift/gen-php/Types.php    |  69 ++++
 .../src/gen/thrift/gen-py/TCLIService/ttypes.py |  41 ++-
 .../gen/thrift/gen-rb/t_c_l_i_service_types.rb  |   8 +-
 .../org/apache/hive/tmpl/QueryProfileTmpl.jamon |  18 +-
 .../hive/service/cli/OperationStatus.java       |  20 +-
 .../hive/service/cli/operation/Operation.java   |  41 ++-
 .../service/cli/operation/SQLOperation.java     |  49 ++-
 .../service/cli/thrift/ThriftCLIService.java    |   3 +
 .../cli/thrift/ThriftCLIServiceClient.java      |   3 +-
 .../apache/hive/service/cli/CLIServiceTest.java | 104 ++++++-
 23 files changed, 902 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java
index de66d9e..ac9b306 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java
@@ -37,6 +37,7 @@ public class TestEmbeddedThriftBinaryCLIService extends CLIServiceTest {
   public static void setUpBeforeClass() throws Exception {
     service = new EmbeddedThriftBinaryCLIService();
     HiveConf conf = new HiveConf();
+    conf.setBoolean("datanucleus.schema.autoCreateTables", true);
     conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict");
     service.init(conf);
     client = new ThriftCLIServiceClient(service);

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
index 9765b9d..418f71e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestQueryDisplay.java
@@ -23,15 +23,12 @@ import org.apache.hadoop.hive.ql.plan.api.StageType;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hive.service.cli.OperationHandle;
 import org.apache.hive.service.cli.SessionHandle;
-import org.apache.hive.service.cli.operation.ExecuteStatementOperation;
 import org.apache.hive.service.cli.operation.SQLOperationDisplay;
 import org.apache.hive.service.rpc.thrift.TProtocolVersion;
 import org.apache.hive.service.server.HiveServer2;
-import org.apache.hive.service.servlet.QueryProfileServlet;
 import org.apache.hive.tmpl.QueryProfileTmpl;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.StringWriter;
@@ -155,8 +152,8 @@ public class TestQueryDisplay {
     Assert.assertTrue(qDisplay1.getPerfLogStarts(QueryDisplay.Phase.COMPILATION).size() > 0);
     Assert.assertTrue(qDisplay1.getPerfLogEnds(QueryDisplay.Phase.COMPILATION).size() > 0);
 
-    Assert.assertEquals(qDisplay1.getTaskInfos().size(), 1);
-    QueryDisplay.TaskInfo tInfo1 = qDisplay1.getTaskInfos().get(0);
+    Assert.assertEquals(qDisplay1.getTaskDisplays().size(), 2);
+    QueryDisplay.TaskDisplay tInfo1 = qDisplay1.getTaskDisplays().get(1);
     Assert.assertEquals(tInfo1.getTaskId(), "Stage-0");
     Assert.assertEquals(tInfo1.getTaskType(), StageType.DDL);
     Assert.assertTrue(tInfo1.getBeginTime() > 0 && tInfo1.getBeginTime() <= System.currentTimeMillis());

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
index f0fda05..7327a42 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
@@ -496,7 +496,7 @@ public class Driver implements CommandProcessor {
       schema = getSchema(sem, conf);
 
       plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
-        SessionState.get().getHiveOperation(), schema);
+        SessionState.get().getHiveOperation(), schema, queryDisplay);
 
       conf.setQueryString(queryStr);
 
@@ -1189,7 +1189,7 @@ public class Driver implements CommandProcessor {
   private int compileInternal(String command) {
     int ret;
     final ReentrantLock compileLock = tryAcquireCompileLock(isParallelEnabled,
-        command);
+      command);
     if (compileLock == null) {
       return ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode();
     }
@@ -1232,8 +1232,8 @@ public class Driver implements CommandProcessor {
     final ReentrantLock compileLock = isParallelEnabled ?
         SessionState.get().getCompileLock() : globalCompileLock;
     long maxCompileLockWaitTime = HiveConf.getTimeVar(
-          this.conf, ConfVars.HIVE_SERVER2_COMPILE_LOCK_TIMEOUT,
-          TimeUnit.SECONDS);
+      this.conf, ConfVars.HIVE_SERVER2_COMPILE_LOCK_TIMEOUT,
+      TimeUnit.SECONDS);
     if (maxCompileLockWaitTime > 0) {
       try {
         if (LOG.isDebugEnabled()) {
@@ -1576,7 +1576,6 @@ public class Driver implements CommandProcessor {
         // Launch upto maxthreads tasks
         Task<? extends Serializable> task;
         while ((task = driverCxt.getRunnable(maxthreads)) != null) {
-          queryDisplay.addTask(task);
           TaskRunner runner = launchTask(task, queryId, noName, jobname, jobs, driverCxt);
           if (!runner.isRunning()) {
             break;
@@ -1589,7 +1588,7 @@ public class Driver implements CommandProcessor {
           continue;
         }
         hookContext.addCompleteTask(tskRun);
-        queryDisplay.setTaskCompleted(tskRun.getTask().getId(), tskRun.getTaskResult());
+        queryDisplay.setTaskResult(tskRun.getTask().getId(), tskRun.getTaskResult());
 
         Task<? extends Serializable> tsk = tskRun.getTask();
         TaskResult result = tskRun.getTaskResult();

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
index c87c825..467dab6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryDisplay.java
@@ -22,11 +22,12 @@ import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.exec.TaskResult;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
 
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
+import java.io.Serializable;
+import java.util.*;
+
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.annotate.JsonWriteNullProperties;
+import org.codehaus.jackson.annotate.JsonIgnore;
 
 /**
  * Some limited query information to save for WebUI.
@@ -41,39 +42,56 @@ public class QueryDisplay {
   private String errorMessage;
   private String queryId;
 
-  private final Map<Phase, Map<String, Long>> hmsTimingMap = new HashMap();
-  private final Map<Phase, Map<String, Long>> perfLogStartMap = new HashMap();
-  private final Map<Phase, Map<String, Long>> perfLogEndMap = new HashMap();
+  private final Map<Phase, Map<String, Long>> hmsTimingMap = new HashMap<Phase, Map<String, Long>>();
+  private final Map<Phase, Map<String, Long>> perfLogStartMap = new HashMap<Phase, Map<String, Long>>();
+  private final Map<Phase, Map<String, Long>> perfLogEndMap = new HashMap<Phase, Map<String, Long>>();
+
+  private final LinkedHashMap<String, TaskDisplay> tasks = new LinkedHashMap<String, TaskDisplay>();
 
-  private final LinkedHashMap<String, TaskInfo> tasks = new LinkedHashMap<String, TaskInfo>();
+  public synchronized <T extends Serializable> void updateTaskStatus(Task<T> tTask) {
+    if (!tasks.containsKey(tTask.getId())) {
+      tasks.put(tTask.getId(), new TaskDisplay(tTask));
+    }
+    tasks.get(tTask.getId()).updateStatus(tTask);
+  }
 
   //Inner classes
-  public static enum Phase {
+  public enum Phase {
     COMPILATION,
     EXECUTION,
   }
 
-  public static class TaskInfo {
+  @JsonWriteNullProperties(false)
+  @JsonIgnoreProperties(ignoreUnknown = true)
+  public static class TaskDisplay {
+
     private Integer returnVal;  //if set, determines that task is complete.
     private String errorMsg;
-    private long endTime;
 
-    final long beginTime;
-    final String taskId;
-    final StageType taskType;
-    final String name;
-    final boolean requireLock;
-    final boolean retryIfFail;
+    private Long beginTime;
+    private Long endTime;
+
+    private String taskId;
+    private String taskExternalHandle;
+
+    public Task.TaskState taskState;
+    private StageType taskType;
+    private String name;
+    private boolean requireLock;
+    private boolean retryIfFail;
+    // required for jackson
+    public TaskDisplay() {
 
-    public TaskInfo (Task task) {
-      beginTime = System.currentTimeMillis();
+    }
+    public TaskDisplay(Task task) {
       taskId = task.getId();
+      taskExternalHandle = task.getExternalHandle();
       taskType = task.getType();
       name = task.getName();
       requireLock = task.requireLock();
       retryIfFail = task.ifRetryCmdWhenFail();
     }
-
+    @JsonIgnore
     public synchronized String getStatus() {
       if (returnVal == null) {
         return "Running";
@@ -84,67 +102,82 @@ public class QueryDisplay {
       }
     }
 
-    public synchronized long getElapsedTime() {
-      if (endTime == 0) {
+    public synchronized Long getElapsedTime() {
+      if (endTime == null) {
+        if (beginTime == null) {
+          return null;
+        }
         return System.currentTimeMillis() - beginTime;
       } else {
         return endTime - beginTime;
       }
     }
 
+    public synchronized Integer getReturnValue() {
+      return returnVal;
+    }
+
     public synchronized String getErrorMsg() {
       return errorMsg;
     }
 
-    public synchronized long getEndTime() {
-      return endTime;
+    public synchronized Long getBeginTime() {
+      return beginTime;
     }
 
-    //Following methods do not need to be synchronized, because they are final fields.
-    public long getBeginTime() {
-      return beginTime;
+    public synchronized Long getEndTime() {
+      return endTime;
     }
 
-    public String getTaskId() {
+    public synchronized String getTaskId() {
       return taskId;
     }
 
-    public StageType getTaskType() {
+    public synchronized StageType getTaskType() {
       return taskType;
     }
 
-    public String getName() {
+    public synchronized String getName() {
       return name;
     }
-
-    public boolean isRequireLock() {
+    @JsonIgnore
+    public synchronized boolean isRequireLock() {
       return requireLock;
     }
-
-    public boolean isRetryIfFail() {
+    @JsonIgnore
+    public synchronized boolean isRetryIfFail() {
       return retryIfFail;
     }
-  }
 
-  public synchronized void addTask(Task task) {
-    tasks.put(task.getId(), new TaskInfo(task));
-  }
+    public synchronized String getExternalHandle() {
+      return taskExternalHandle;
+    }
 
-  public synchronized void setTaskCompleted(String taskId, TaskResult result) {
-    TaskInfo taskInfo = tasks.get(taskId);
-    if (taskInfo != null) {
-      taskInfo.returnVal = result.getExitVal();
+    public synchronized <T extends Serializable> void updateStatus(Task<T> tTask) {
+      this.taskState = tTask.getTaskState();
+      switch(taskState) {
+        case RUNNING:
+          beginTime = System.currentTimeMillis();
+          break;
+        case FINISHED:
+          endTime = System.currentTimeMillis();
+          break;
+      }
+    }
+  }
+  public synchronized void setTaskResult(String taskId, TaskResult result) {
+    TaskDisplay taskDisplay = tasks.get(taskId);
+    if (taskDisplay != null) {
+      taskDisplay.returnVal = result.getExitVal();
       if (result.getTaskError() != null) {
-        taskInfo.errorMsg = result.getTaskError().toString();
+        taskDisplay.errorMsg = result.getTaskError().toString();
       }
-      taskInfo.endTime = System.currentTimeMillis();
     }
   }
-
-  public synchronized List<TaskInfo> getTaskInfos() {
-    List<TaskInfo> taskInfos = new ArrayList<TaskInfo>();
-    taskInfos.addAll(tasks.values());
-    return taskInfos;
+  public synchronized List<TaskDisplay> getTaskDisplays() {
+    List<TaskDisplay> taskDisplays = new ArrayList<TaskDisplay>();
+    taskDisplays.addAll(tasks.values());
+    return taskDisplays;
   }
 
   public synchronized void setQueryStr(String queryStr) {

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
index 4933b34..ef0923d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
@@ -114,11 +114,25 @@ public class QueryPlan implements Serializable {
 
   public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId,
                    HiveOperation operation, Schema resultSchema) {
+    this(queryString, sem, startTime, queryId, operation, resultSchema, null);
+  }
+  public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId,
+                  HiveOperation operation, Schema resultSchema, QueryDisplay queryDisplay) {
     this.queryString = queryString;
 
     rootTasks = new ArrayList<Task<? extends Serializable>>(sem.getAllRootTasks());
     reducerTimeStatsPerJobList = new ArrayList<ReducerTimeStatsPerJob>();
     fetchTask = sem.getFetchTask();
+    if (queryDisplay != null) {
+      if (fetchTask != null) {
+        fetchTask.setQueryDisplay(queryDisplay);
+      }
+      if (rootTasks!= null) {
+        for (Task t : rootTasks) {
+          t.setQueryDisplay(queryDisplay);
+        }
+      }
+    }
     // Note that inputs and outputs can be changed when the query gets executed
     inputs = sem.getAllInputs();
     outputs = sem.getAllOutputs();

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
index e199e5e..6c677f5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
@@ -30,9 +30,7 @@ import java.util.List;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.CompilationOpContext;
-import org.apache.hadoop.hive.ql.DriverContext;
-import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.*;
 import org.apache.hadoop.hive.ql.lib.Node;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
@@ -52,10 +50,6 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   private static final long serialVersionUID = 1L;
   public transient HashMap<String, Long> taskCounters;
   public transient TaskHandle taskHandle;
-  protected transient boolean started;
-  protected transient boolean initialized;
-  protected transient boolean isdone;
-  protected transient boolean queued;
   protected transient HiveConf conf;
   protected transient LogHelper console;
   protected transient QueryPlan queryPlan;
@@ -81,18 +75,32 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   // created in case the mapjoin failed.
   public static final int MAPJOIN_ONLY_NOBACKUP = 7;
   public static final int CONVERTED_SORTMERGEJOIN = 8;
-
+  public QueryDisplay queryDisplay = null;
   // Descendants tasks who subscribe feeds from this task
   protected transient List<Task<? extends Serializable>> feedSubscribers;
 
   protected String id;
   protected T work;
-
+  private TaskState taskState = TaskState.CREATED;
   private transient boolean fetchSource;
 
-  public static enum FeedType {
+  public enum FeedType {
     DYNAMIC_PARTITIONS, // list of dynamic partitions
   }
+  public enum TaskState {
+    // Task data structures have been initialized
+    INITIALIZED,
+    // Task has been queued for execution by the driver
+    QUEUED,
+    // Task is currently running
+    RUNNING,
+    // Task has completed
+    FINISHED,
+    // Task is just created
+    CREATED,
+    // Task state is unkown
+    UNKNOWN
+  }
 
   // Bean methods
 
@@ -108,10 +116,6 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   private Throwable exception;
 
   public Task() {
-    isdone = false;
-    started = false;
-    initialized = false;
-    queued = false;
     this.taskCounters = new HashMap<String, Long>();
     taskTag = Task.NO_TAG;
   }
@@ -123,13 +127,25 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext,
       CompilationOpContext opContext) {
     this.queryPlan = queryPlan;
-    isdone = false;
-    started = false;
     setInitialized();
     this.conf = conf;
     this.driverContext = driverContext;
     console = new LogHelper(LOG);
   }
+  public void setQueryDisplay(QueryDisplay queryDisplay) {
+    this.queryDisplay = queryDisplay;
+  }
+
+  private void updateStatusInQueryDisplay() {
+    if (queryDisplay != null) {
+      queryDisplay.updateTaskStatus(this);
+    }
+  }
+
+  private void setState(TaskState state) {
+    this.taskState = state;
+    updateStatusInQueryDisplay();
+  }
 
   protected Hive getHive() {
     try {
@@ -323,37 +339,36 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
       }
     }
   }
-
   public void setStarted() {
-    this.started = true;
+    setState(TaskState.RUNNING);
   }
 
   public boolean started() {
-    return started;
+    return taskState == TaskState.RUNNING;
   }
 
   public boolean done() {
-    return isdone;
+    return taskState == TaskState.FINISHED;
   }
 
   public void setDone() {
-    isdone = true;
+    setState(TaskState.FINISHED);
   }
 
   public void setQueued() {
-    queued = true;
+    setState(TaskState.QUEUED);
   }
 
   public boolean getQueued() {
-    return queued;
+    return taskState == TaskState.QUEUED;
   }
 
   public void setInitialized() {
-    initialized = true;
+    setState(TaskState.INITIALIZED);
   }
 
   public boolean getInitialized() {
-    return initialized;
+    return taskState == TaskState.INITIALIZED;
   }
 
   public boolean isRunnable() {
@@ -391,6 +406,14 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
     return id;
   }
 
+  public String getExternalHandle() {
+    return null;
+  }
+
+  public TaskState getTaskState() {
+    return taskState;
+  }
+
   public boolean isMapRedTask() {
     return false;
   }
@@ -572,4 +595,6 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
   public boolean equals(Object obj) {
     return toString().equals(String.valueOf(obj));
   }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
index ce020a5..d164859 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
@@ -430,6 +430,7 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
 
       // Finally SUBMIT the JOB!
       rj = jc.submitJob(job);
+      this.jobID = rj.getJobID();
 
       returnVal = jobExecHelper.progress(rj, jc);
       success = (returnVal == 0);
@@ -849,5 +850,10 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
       rj = null;
     }
   }
+
+  @Override
+  public String getExternalHandle() {
+    return this.jobID;
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
index 5bc3d9e..310356c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java
@@ -363,25 +363,25 @@ public class MapRedTask extends ExecDriver implements Serializable {
   @Override
   public boolean mapStarted() {
     boolean b = super.mapStarted();
-    return runningViaChild ? isdone : b;
+    return runningViaChild ? done() : b;
   }
 
   @Override
   public boolean reduceStarted() {
     boolean b = super.reduceStarted();
-    return runningViaChild ? isdone : b;
+    return runningViaChild ? done() : b;
   }
 
   @Override
   public boolean mapDone() {
     boolean b = super.mapDone();
-    return runningViaChild ? isdone : b;
+    return runningViaChild ? done() : b;
   }
 
   @Override
   public boolean reduceDone() {
     boolean b = super.reduceDone();
-    return runningViaChild ? isdone : b;
+    return runningViaChild ? done() : b;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
index 45cd533..687f551 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistory.java
@@ -109,7 +109,7 @@ public interface HiveHistory {
   };
 
   /**
-   * TaskInfo.
+   * TaskDisplay.
    *
    */
   public static class TaskInfo extends Info {

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/if/TCLIService.thrift
----------------------------------------------------------------------
diff --git a/service-rpc/if/TCLIService.thrift b/service-rpc/if/TCLIService.thrift
index 0aa9d13..aa28b6e 100644
--- a/service-rpc/if/TCLIService.thrift
+++ b/service-rpc/if/TCLIService.thrift
@@ -977,6 +977,15 @@ struct TGetOperationStatusResp {
 
   // Error message
   5: optional string errorMessage
+
+  // List of statuses of sub tasks
+  6: optional string taskStatus
+
+  // When was the operation started
+  7: optional i64 operationStarted
+  // When was the operation completed
+  8: optional i64 operationCompleted
+
 }
 
 

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
index e62e8b7..3a27a60 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp
@@ -7612,6 +7612,21 @@ void TGetOperationStatusResp::__set_errorMessage(const std::string& val) {
 __isset.errorMessage = true;
 }
 
+void TGetOperationStatusResp::__set_taskStatus(const std::string& val) {
+  this->taskStatus = val;
+__isset.taskStatus = true;
+}
+
+void TGetOperationStatusResp::__set_operationStarted(const int64_t val) {
+  this->operationStarted = val;
+__isset.operationStarted = true;
+}
+
+void TGetOperationStatusResp::__set_operationCompleted(const int64_t val) {
+  this->operationCompleted = val;
+__isset.operationCompleted = true;
+}
+
 uint32_t TGetOperationStatusResp::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -7676,6 +7691,30 @@ uint32_t TGetOperationStatusResp::read(::apache::thrift::protocol::TProtocol* ip
           xfer += iprot->skip(ftype);
         }
         break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->taskStatus);
+          this->__isset.taskStatus = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->operationStarted);
+          this->__isset.operationStarted = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->operationCompleted);
+          this->__isset.operationCompleted = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -7719,6 +7758,21 @@ uint32_t TGetOperationStatusResp::write(::apache::thrift::protocol::TProtocol* o
     xfer += oprot->writeString(this->errorMessage);
     xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.taskStatus) {
+    xfer += oprot->writeFieldBegin("taskStatus", ::apache::thrift::protocol::T_STRING, 6);
+    xfer += oprot->writeString(this->taskStatus);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.operationStarted) {
+    xfer += oprot->writeFieldBegin("operationStarted", ::apache::thrift::protocol::T_I64, 7);
+    xfer += oprot->writeI64(this->operationStarted);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.operationCompleted) {
+    xfer += oprot->writeFieldBegin("operationCompleted", ::apache::thrift::protocol::T_I64, 8);
+    xfer += oprot->writeI64(this->operationCompleted);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -7731,6 +7785,9 @@ void swap(TGetOperationStatusResp &a, TGetOperationStatusResp &b) {
   swap(a.sqlState, b.sqlState);
   swap(a.errorCode, b.errorCode);
   swap(a.errorMessage, b.errorMessage);
+  swap(a.taskStatus, b.taskStatus);
+  swap(a.operationStarted, b.operationStarted);
+  swap(a.operationCompleted, b.operationCompleted);
   swap(a.__isset, b.__isset);
 }
 
@@ -7740,6 +7797,9 @@ TGetOperationStatusResp::TGetOperationStatusResp(const TGetOperationStatusResp&
   sqlState = other263.sqlState;
   errorCode = other263.errorCode;
   errorMessage = other263.errorMessage;
+  taskStatus = other263.taskStatus;
+  operationStarted = other263.operationStarted;
+  operationCompleted = other263.operationCompleted;
   __isset = other263.__isset;
 }
 TGetOperationStatusResp& TGetOperationStatusResp::operator=(const TGetOperationStatusResp& other264) {
@@ -7748,6 +7808,9 @@ TGetOperationStatusResp& TGetOperationStatusResp::operator=(const TGetOperationS
   sqlState = other264.sqlState;
   errorCode = other264.errorCode;
   errorMessage = other264.errorMessage;
+  taskStatus = other264.taskStatus;
+  operationStarted = other264.operationStarted;
+  operationCompleted = other264.operationCompleted;
   __isset = other264.__isset;
   return *this;
 }
@@ -7759,6 +7822,9 @@ void TGetOperationStatusResp::printTo(std::ostream& out) const {
   out << ", " << "sqlState="; (__isset.sqlState ? (out << to_string(sqlState)) : (out << "<null>"));
   out << ", " << "errorCode="; (__isset.errorCode ? (out << to_string(errorCode)) : (out << "<null>"));
   out << ", " << "errorMessage="; (__isset.errorMessage ? (out << to_string(errorMessage)) : (out << "<null>"));
+  out << ", " << "taskStatus="; (__isset.taskStatus ? (out << to_string(taskStatus)) : (out << "<null>"));
+  out << ", " << "operationStarted="; (__isset.operationStarted ? (out << to_string(operationStarted)) : (out << "<null>"));
+  out << ", " << "operationCompleted="; (__isset.operationCompleted ? (out << to_string(operationCompleted)) : (out << "<null>"));
   out << ")";
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
index 5364293..7f1d9dd 100644
--- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
+++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h
@@ -3408,11 +3408,14 @@ inline std::ostream& operator<<(std::ostream& out, const TGetOperationStatusReq&
 }
 
 typedef struct _TGetOperationStatusResp__isset {
-  _TGetOperationStatusResp__isset() : operationState(false), sqlState(false), errorCode(false), errorMessage(false) {}
+  _TGetOperationStatusResp__isset() : operationState(false), sqlState(false), errorCode(false), errorMessage(false), taskStatus(false), operationStarted(false), operationCompleted(false) {}
   bool operationState :1;
   bool sqlState :1;
   bool errorCode :1;
   bool errorMessage :1;
+  bool taskStatus :1;
+  bool operationStarted :1;
+  bool operationCompleted :1;
 } _TGetOperationStatusResp__isset;
 
 class TGetOperationStatusResp {
@@ -3420,7 +3423,7 @@ class TGetOperationStatusResp {
 
   TGetOperationStatusResp(const TGetOperationStatusResp&);
   TGetOperationStatusResp& operator=(const TGetOperationStatusResp&);
-  TGetOperationStatusResp() : operationState((TOperationState::type)0), sqlState(), errorCode(0), errorMessage() {
+  TGetOperationStatusResp() : operationState((TOperationState::type)0), sqlState(), errorCode(0), errorMessage(), taskStatus(), operationStarted(0), operationCompleted(0) {
   }
 
   virtual ~TGetOperationStatusResp() throw();
@@ -3429,6 +3432,9 @@ class TGetOperationStatusResp {
   std::string sqlState;
   int32_t errorCode;
   std::string errorMessage;
+  std::string taskStatus;
+  int64_t operationStarted;
+  int64_t operationCompleted;
 
   _TGetOperationStatusResp__isset __isset;
 
@@ -3442,6 +3448,12 @@ class TGetOperationStatusResp {
 
   void __set_errorMessage(const std::string& val);
 
+  void __set_taskStatus(const std::string& val);
+
+  void __set_operationStarted(const int64_t val);
+
+  void __set_operationCompleted(const int64_t val);
+
   bool operator == (const TGetOperationStatusResp & rhs) const
   {
     if (!(status == rhs.status))
@@ -3462,6 +3474,18 @@ class TGetOperationStatusResp {
       return false;
     else if (__isset.errorMessage && !(errorMessage == rhs.errorMessage))
       return false;
+    if (__isset.taskStatus != rhs.__isset.taskStatus)
+      return false;
+    else if (__isset.taskStatus && !(taskStatus == rhs.taskStatus))
+      return false;
+    if (__isset.operationStarted != rhs.__isset.operationStarted)
+      return false;
+    else if (__isset.operationStarted && !(operationStarted == rhs.operationStarted))
+      return false;
+    if (__isset.operationCompleted != rhs.__isset.operationCompleted)
+      return false;
+    else if (__isset.operationCompleted && !(operationCompleted == rhs.operationCompleted))
+      return false;
     return true;
   }
   bool operator != (const TGetOperationStatusResp &rhs) const {

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java
index a7a8ebc..3049280 100644
--- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java
+++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java
@@ -43,6 +43,9 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
   private static final org.apache.thrift.protocol.TField SQL_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("sqlState", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)4);
   private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField TASK_STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("taskStatus", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField OPERATION_STARTED_FIELD_DESC = new org.apache.thrift.protocol.TField("operationStarted", org.apache.thrift.protocol.TType.I64, (short)7);
+  private static final org.apache.thrift.protocol.TField OPERATION_COMPLETED_FIELD_DESC = new org.apache.thrift.protocol.TField("operationCompleted", org.apache.thrift.protocol.TType.I64, (short)8);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,6 +58,9 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
   private String sqlState; // optional
   private int errorCode; // optional
   private String errorMessage; // optional
+  private String taskStatus; // optional
+  private long operationStarted; // optional
+  private long operationCompleted; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -66,7 +72,10 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     OPERATION_STATE((short)2, "operationState"),
     SQL_STATE((short)3, "sqlState"),
     ERROR_CODE((short)4, "errorCode"),
-    ERROR_MESSAGE((short)5, "errorMessage");
+    ERROR_MESSAGE((short)5, "errorMessage"),
+    TASK_STATUS((short)6, "taskStatus"),
+    OPERATION_STARTED((short)7, "operationStarted"),
+    OPERATION_COMPLETED((short)8, "operationCompleted");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -91,6 +100,12 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
           return ERROR_CODE;
         case 5: // ERROR_MESSAGE
           return ERROR_MESSAGE;
+        case 6: // TASK_STATUS
+          return TASK_STATUS;
+        case 7: // OPERATION_STARTED
+          return OPERATION_STARTED;
+        case 8: // OPERATION_COMPLETED
+          return OPERATION_COMPLETED;
         default:
           return null;
       }
@@ -132,8 +147,10 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
 
   // isset id assignments
   private static final int __ERRORCODE_ISSET_ID = 0;
+  private static final int __OPERATIONSTARTED_ISSET_ID = 1;
+  private static final int __OPERATIONCOMPLETED_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.OPERATION_STATE,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE};
+  private static final _Fields optionals[] = {_Fields.OPERATION_STATE,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE,_Fields.TASK_STATUS,_Fields.OPERATION_STARTED,_Fields.OPERATION_COMPLETED};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -147,6 +164,12 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
     tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TASK_STATUS, new org.apache.thrift.meta_data.FieldMetaData("taskStatus", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OPERATION_STARTED, new org.apache.thrift.meta_data.FieldMetaData("operationStarted", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.OPERATION_COMPLETED, new org.apache.thrift.meta_data.FieldMetaData("operationCompleted", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetOperationStatusResp.class, metaDataMap);
   }
@@ -179,6 +202,11 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     if (other.isSetErrorMessage()) {
       this.errorMessage = other.errorMessage;
     }
+    if (other.isSetTaskStatus()) {
+      this.taskStatus = other.taskStatus;
+    }
+    this.operationStarted = other.operationStarted;
+    this.operationCompleted = other.operationCompleted;
   }
 
   public TGetOperationStatusResp deepCopy() {
@@ -193,6 +221,11 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     setErrorCodeIsSet(false);
     this.errorCode = 0;
     this.errorMessage = null;
+    this.taskStatus = null;
+    setOperationStartedIsSet(false);
+    this.operationStarted = 0;
+    setOperationCompletedIsSet(false);
+    this.operationCompleted = 0;
   }
 
   public TStatus getStatus() {
@@ -317,6 +350,73 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     }
   }
 
+  public String getTaskStatus() {
+    return this.taskStatus;
+  }
+
+  public void setTaskStatus(String taskStatus) {
+    this.taskStatus = taskStatus;
+  }
+
+  public void unsetTaskStatus() {
+    this.taskStatus = null;
+  }
+
+  /** Returns true if field taskStatus is set (has been assigned a value) and false otherwise */
+  public boolean isSetTaskStatus() {
+    return this.taskStatus != null;
+  }
+
+  public void setTaskStatusIsSet(boolean value) {
+    if (!value) {
+      this.taskStatus = null;
+    }
+  }
+
+  public long getOperationStarted() {
+    return this.operationStarted;
+  }
+
+  public void setOperationStarted(long operationStarted) {
+    this.operationStarted = operationStarted;
+    setOperationStartedIsSet(true);
+  }
+
+  public void unsetOperationStarted() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID);
+  }
+
+  /** Returns true if field operationStarted is set (has been assigned a value) and false otherwise */
+  public boolean isSetOperationStarted() {
+    return EncodingUtils.testBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID);
+  }
+
+  public void setOperationStartedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID, value);
+  }
+
+  public long getOperationCompleted() {
+    return this.operationCompleted;
+  }
+
+  public void setOperationCompleted(long operationCompleted) {
+    this.operationCompleted = operationCompleted;
+    setOperationCompletedIsSet(true);
+  }
+
+  public void unsetOperationCompleted() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID);
+  }
+
+  /** Returns true if field operationCompleted is set (has been assigned a value) and false otherwise */
+  public boolean isSetOperationCompleted() {
+    return EncodingUtils.testBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID);
+  }
+
+  public void setOperationCompletedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case STATUS:
@@ -359,6 +459,30 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       }
       break;
 
+    case TASK_STATUS:
+      if (value == null) {
+        unsetTaskStatus();
+      } else {
+        setTaskStatus((String)value);
+      }
+      break;
+
+    case OPERATION_STARTED:
+      if (value == null) {
+        unsetOperationStarted();
+      } else {
+        setOperationStarted((Long)value);
+      }
+      break;
+
+    case OPERATION_COMPLETED:
+      if (value == null) {
+        unsetOperationCompleted();
+      } else {
+        setOperationCompleted((Long)value);
+      }
+      break;
+
     }
   }
 
@@ -379,6 +503,15 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     case ERROR_MESSAGE:
       return getErrorMessage();
 
+    case TASK_STATUS:
+      return getTaskStatus();
+
+    case OPERATION_STARTED:
+      return getOperationStarted();
+
+    case OPERATION_COMPLETED:
+      return getOperationCompleted();
+
     }
     throw new IllegalStateException();
   }
@@ -400,6 +533,12 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       return isSetErrorCode();
     case ERROR_MESSAGE:
       return isSetErrorMessage();
+    case TASK_STATUS:
+      return isSetTaskStatus();
+    case OPERATION_STARTED:
+      return isSetOperationStarted();
+    case OPERATION_COMPLETED:
+      return isSetOperationCompleted();
     }
     throw new IllegalStateException();
   }
@@ -462,6 +601,33 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
         return false;
     }
 
+    boolean this_present_taskStatus = true && this.isSetTaskStatus();
+    boolean that_present_taskStatus = true && that.isSetTaskStatus();
+    if (this_present_taskStatus || that_present_taskStatus) {
+      if (!(this_present_taskStatus && that_present_taskStatus))
+        return false;
+      if (!this.taskStatus.equals(that.taskStatus))
+        return false;
+    }
+
+    boolean this_present_operationStarted = true && this.isSetOperationStarted();
+    boolean that_present_operationStarted = true && that.isSetOperationStarted();
+    if (this_present_operationStarted || that_present_operationStarted) {
+      if (!(this_present_operationStarted && that_present_operationStarted))
+        return false;
+      if (this.operationStarted != that.operationStarted)
+        return false;
+    }
+
+    boolean this_present_operationCompleted = true && this.isSetOperationCompleted();
+    boolean that_present_operationCompleted = true && that.isSetOperationCompleted();
+    if (this_present_operationCompleted || that_present_operationCompleted) {
+      if (!(this_present_operationCompleted && that_present_operationCompleted))
+        return false;
+      if (this.operationCompleted != that.operationCompleted)
+        return false;
+    }
+
     return true;
   }
 
@@ -494,6 +660,21 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
     if (present_errorMessage)
       list.add(errorMessage);
 
+    boolean present_taskStatus = true && (isSetTaskStatus());
+    list.add(present_taskStatus);
+    if (present_taskStatus)
+      list.add(taskStatus);
+
+    boolean present_operationStarted = true && (isSetOperationStarted());
+    list.add(present_operationStarted);
+    if (present_operationStarted)
+      list.add(operationStarted);
+
+    boolean present_operationCompleted = true && (isSetOperationCompleted());
+    list.add(present_operationCompleted);
+    if (present_operationCompleted)
+      list.add(operationCompleted);
+
     return list.hashCode();
   }
 
@@ -555,6 +736,36 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetTaskStatus()).compareTo(other.isSetTaskStatus());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTaskStatus()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskStatus, other.taskStatus);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOperationStarted()).compareTo(other.isSetOperationStarted());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOperationStarted()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationStarted, other.operationStarted);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOperationCompleted()).compareTo(other.isSetOperationCompleted());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOperationCompleted()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationCompleted, other.operationCompleted);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -618,6 +829,28 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       }
       first = false;
     }
+    if (isSetTaskStatus()) {
+      if (!first) sb.append(", ");
+      sb.append("taskStatus:");
+      if (this.taskStatus == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.taskStatus);
+      }
+      first = false;
+    }
+    if (isSetOperationStarted()) {
+      if (!first) sb.append(", ");
+      sb.append("operationStarted:");
+      sb.append(this.operationStarted);
+      first = false;
+    }
+    if (isSetOperationCompleted()) {
+      if (!first) sb.append(", ");
+      sb.append("operationCompleted:");
+      sb.append(this.operationCompleted);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -711,6 +944,30 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 6: // TASK_STATUS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.taskStatus = iprot.readString();
+              struct.setTaskStatusIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // OPERATION_STARTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.operationStarted = iprot.readI64();
+              struct.setOperationStartedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // OPERATION_COMPLETED
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.operationCompleted = iprot.readI64();
+              struct.setOperationCompletedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -755,6 +1012,23 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
           oprot.writeFieldEnd();
         }
       }
+      if (struct.taskStatus != null) {
+        if (struct.isSetTaskStatus()) {
+          oprot.writeFieldBegin(TASK_STATUS_FIELD_DESC);
+          oprot.writeString(struct.taskStatus);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetOperationStarted()) {
+        oprot.writeFieldBegin(OPERATION_STARTED_FIELD_DESC);
+        oprot.writeI64(struct.operationStarted);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetOperationCompleted()) {
+        oprot.writeFieldBegin(OPERATION_COMPLETED_FIELD_DESC);
+        oprot.writeI64(struct.operationCompleted);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -786,7 +1060,16 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       if (struct.isSetErrorMessage()) {
         optionals.set(3);
       }
-      oprot.writeBitSet(optionals, 4);
+      if (struct.isSetTaskStatus()) {
+        optionals.set(4);
+      }
+      if (struct.isSetOperationStarted()) {
+        optionals.set(5);
+      }
+      if (struct.isSetOperationCompleted()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
       if (struct.isSetOperationState()) {
         oprot.writeI32(struct.operationState.getValue());
       }
@@ -799,6 +1082,15 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       if (struct.isSetErrorMessage()) {
         oprot.writeString(struct.errorMessage);
       }
+      if (struct.isSetTaskStatus()) {
+        oprot.writeString(struct.taskStatus);
+      }
+      if (struct.isSetOperationStarted()) {
+        oprot.writeI64(struct.operationStarted);
+      }
+      if (struct.isSetOperationCompleted()) {
+        oprot.writeI64(struct.operationCompleted);
+      }
     }
 
     @Override
@@ -807,7 +1099,7 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
       struct.status = new TStatus();
       struct.status.read(iprot);
       struct.setStatusIsSet(true);
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(7);
       if (incoming.get(0)) {
         struct.operationState = org.apache.hive.service.rpc.thrift.TOperationState.findByValue(iprot.readI32());
         struct.setOperationStateIsSet(true);
@@ -824,6 +1116,18 @@ public class TGetOperationStatusResp implements org.apache.thrift.TBase<TGetOper
         struct.errorMessage = iprot.readString();
         struct.setErrorMessageIsSet(true);
       }
+      if (incoming.get(4)) {
+        struct.taskStatus = iprot.readString();
+        struct.setTaskStatusIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.operationStarted = iprot.readI64();
+        struct.setOperationStartedIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.operationCompleted = iprot.readI64();
+        struct.setOperationCompletedIsSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-php/Types.php
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-php/Types.php b/service-rpc/src/gen/thrift/gen-php/Types.php
index 76805df..b7df50a 100644
--- a/service-rpc/src/gen/thrift/gen-php/Types.php
+++ b/service-rpc/src/gen/thrift/gen-php/Types.php
@@ -7416,6 +7416,18 @@ class TGetOperationStatusResp {
    * @var string
    */
   public $errorMessage = null;
+  /**
+   * @var string
+   */
+  public $taskStatus = null;
+  /**
+   * @var int
+   */
+  public $operationStarted = null;
+  /**
+   * @var int
+   */
+  public $operationCompleted = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -7441,6 +7453,18 @@ class TGetOperationStatusResp {
           'var' => 'errorMessage',
           'type' => TType::STRING,
           ),
+        6 => array(
+          'var' => 'taskStatus',
+          'type' => TType::STRING,
+          ),
+        7 => array(
+          'var' => 'operationStarted',
+          'type' => TType::I64,
+          ),
+        8 => array(
+          'var' => 'operationCompleted',
+          'type' => TType::I64,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -7459,6 +7483,15 @@ class TGetOperationStatusResp {
       if (isset($vals['errorMessage'])) {
         $this->errorMessage = $vals['errorMessage'];
       }
+      if (isset($vals['taskStatus'])) {
+        $this->taskStatus = $vals['taskStatus'];
+      }
+      if (isset($vals['operationStarted'])) {
+        $this->operationStarted = $vals['operationStarted'];
+      }
+      if (isset($vals['operationCompleted'])) {
+        $this->operationCompleted = $vals['operationCompleted'];
+      }
     }
   }
 
@@ -7517,6 +7550,27 @@ class TGetOperationStatusResp {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 6:
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->taskStatus);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 7:
+          if ($ftype == TType::I64) {
+            $xfer += $input->readI64($this->operationStarted);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
+        case 8:
+          if ($ftype == TType::I64) {
+            $xfer += $input->readI64($this->operationCompleted);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -7558,6 +7612,21 @@ class TGetOperationStatusResp {
       $xfer += $output->writeString($this->errorMessage);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->taskStatus !== null) {
+      $xfer += $output->writeFieldBegin('taskStatus', TType::STRING, 6);
+      $xfer += $output->writeString($this->taskStatus);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->operationStarted !== null) {
+      $xfer += $output->writeFieldBegin('operationStarted', TType::I64, 7);
+      $xfer += $output->writeI64($this->operationStarted);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->operationCompleted !== null) {
+      $xfer += $output->writeFieldBegin('operationCompleted', TType::I64, 8);
+      $xfer += $output->writeI64($this->operationCompleted);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
index ef5f5f5..c691781 100644
--- a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
+++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py
@@ -5635,6 +5635,9 @@ class TGetOperationStatusResp:
    - sqlState
    - errorCode
    - errorMessage
+   - taskStatus
+   - operationStarted
+   - operationCompleted
   """
 
   thrift_spec = (
@@ -5644,14 +5647,20 @@ class TGetOperationStatusResp:
     (3, TType.STRING, 'sqlState', None, None, ), # 3
     (4, TType.I32, 'errorCode', None, None, ), # 4
     (5, TType.STRING, 'errorMessage', None, None, ), # 5
+    (6, TType.STRING, 'taskStatus', None, None, ), # 6
+    (7, TType.I64, 'operationStarted', None, None, ), # 7
+    (8, TType.I64, 'operationCompleted', None, None, ), # 8
   )
 
-  def __init__(self, status=None, operationState=None, sqlState=None, errorCode=None, errorMessage=None,):
+  def __init__(self, status=None, operationState=None, sqlState=None, errorCode=None, errorMessage=None, taskStatus=None, operationStarted=None, operationCompleted=None,):
     self.status = status
     self.operationState = operationState
     self.sqlState = sqlState
     self.errorCode = errorCode
     self.errorMessage = errorMessage
+    self.taskStatus = taskStatus
+    self.operationStarted = operationStarted
+    self.operationCompleted = operationCompleted
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -5688,6 +5697,21 @@ class TGetOperationStatusResp:
           self.errorMessage = iprot.readString()
         else:
           iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.STRING:
+          self.taskStatus = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.I64:
+          self.operationStarted = iprot.readI64()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.I64:
+          self.operationCompleted = iprot.readI64()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -5718,6 +5742,18 @@ class TGetOperationStatusResp:
       oprot.writeFieldBegin('errorMessage', TType.STRING, 5)
       oprot.writeString(self.errorMessage)
       oprot.writeFieldEnd()
+    if self.taskStatus is not None:
+      oprot.writeFieldBegin('taskStatus', TType.STRING, 6)
+      oprot.writeString(self.taskStatus)
+      oprot.writeFieldEnd()
+    if self.operationStarted is not None:
+      oprot.writeFieldBegin('operationStarted', TType.I64, 7)
+      oprot.writeI64(self.operationStarted)
+      oprot.writeFieldEnd()
+    if self.operationCompleted is not None:
+      oprot.writeFieldBegin('operationCompleted', TType.I64, 8)
+      oprot.writeI64(self.operationCompleted)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -5734,6 +5770,9 @@ class TGetOperationStatusResp:
     value = (value * 31) ^ hash(self.sqlState)
     value = (value * 31) ^ hash(self.errorCode)
     value = (value * 31) ^ hash(self.errorMessage)
+    value = (value * 31) ^ hash(self.taskStatus)
+    value = (value * 31) ^ hash(self.operationStarted)
+    value = (value * 31) ^ hash(self.operationCompleted)
     return value
 
   def __repr__(self):

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
----------------------------------------------------------------------
diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
index f004ec4..07ed97c 100644
--- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
+++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb
@@ -1471,13 +1471,19 @@ class TGetOperationStatusResp
   SQLSTATE = 3
   ERRORCODE = 4
   ERRORMESSAGE = 5
+  TASKSTATUS = 6
+  OPERATIONSTARTED = 7
+  OPERATIONCOMPLETED = 8
 
   FIELDS = {
     STATUS => {:type => ::Thrift::Types::STRUCT, :name => 'status', :class => ::TStatus},
     OPERATIONSTATE => {:type => ::Thrift::Types::I32, :name => 'operationState', :optional => true, :enum_class => ::TOperationState},
     SQLSTATE => {:type => ::Thrift::Types::STRING, :name => 'sqlState', :optional => true},
     ERRORCODE => {:type => ::Thrift::Types::I32, :name => 'errorCode', :optional => true},
-    ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true}
+    ERRORMESSAGE => {:type => ::Thrift::Types::STRING, :name => 'errorMessage', :optional => true},
+    TASKSTATUS => {:type => ::Thrift::Types::STRING, :name => 'taskStatus', :optional => true},
+    OPERATIONSTARTED => {:type => ::Thrift::Types::I64, :name => 'operationStarted', :optional => true},
+    OPERATIONCOMPLETED => {:type => ::Thrift::Types::I64, :name => 'operationCompleted', :optional => true}
   }
 
   def struct_fields; FIELDS; end

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
----------------------------------------------------------------------
diff --git a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
index c513689..8d51a73 100644
--- a/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
+++ b/service/src/jamon/org/apache/hive/tmpl/QueryProfileTmpl.jamon
@@ -176,16 +176,16 @@ org.apache.hive.service.cli.operation.SQLOperationDisplay;
            <th>Retry If Fail</th>
         </tr>
 
-       <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getTaskInfos() != null %>
-           <%for QueryDisplay.TaskInfo taskInfo : sod.getQueryDisplay().getTaskInfos() %>
+       <%if sod.getQueryDisplay() != null && sod.getQueryDisplay().getTaskDisplays() != null %>
+           <%for QueryDisplay.TaskDisplay taskDisplay : sod.getQueryDisplay().getTaskDisplays() %>
                <tr>
-                   <td><% taskInfo.getTaskId() + ":" + taskInfo.getTaskType() %></td>
-                   <td><% taskInfo.getStatus() %></td>
-                   <td><% new Date(taskInfo.getBeginTime()) %>
-                   <td><% taskInfo.getEndTime() == 0 ? "" : new Date(taskInfo.getEndTime()) %></td>
-                   <td><% taskInfo.getElapsedTime()/1000 %> (s) </td>
-                   <td><% taskInfo.isRequireLock() %></td>
-                   <td><% taskInfo.isRetryIfFail() %></td>
+                   <td><% taskDisplay.getTaskId() + ":" + taskDisplay.getTaskType() %></td>
+                   <td><% taskDisplay.getStatus() %></td>
+                   <td><% taskDisplay.getBeginTime() == null ? "" : new Date(taskDisplay.getBeginTime()) %></td>
+                   <td><% taskDisplay.getEndTime() == null ? "" : new Date(taskDisplay.getEndTime()) %></td>
+                   <td><% taskDisplay.getElapsedTime() == null ? "" : taskDisplay.getElapsedTime()/1000 %> (s) </td>
+                   <td><% taskDisplay.isRequireLock() %></td>
+                   <td><% taskDisplay.isRetryIfFail() %></td>
                </tr>
            </%for>
        </%if>

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/java/org/apache/hive/service/cli/OperationStatus.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/OperationStatus.java b/service/src/java/org/apache/hive/service/cli/OperationStatus.java
index e45b828..5e24d38 100644
--- a/service/src/java/org/apache/hive/service/cli/OperationStatus.java
+++ b/service/src/java/org/apache/hive/service/cli/OperationStatus.java
@@ -25,10 +25,16 @@ package org.apache.hive.service.cli;
 public class OperationStatus {
 
   private final OperationState state;
+  private final String taskStatus;
+  private final long operationStarted;
+  private final long operationCompleted;
   private final HiveSQLException operationException;
 
-  public OperationStatus(OperationState state, HiveSQLException operationException) {
+  public OperationStatus(OperationState state, String taskStatus, long operationStarted, long operationCompleted, HiveSQLException operationException) {
     this.state = state;
+    this.taskStatus = taskStatus;
+    this.operationStarted = operationStarted;
+    this.operationCompleted = operationCompleted;
     this.operationException = operationException;
   }
 
@@ -36,6 +42,18 @@ public class OperationStatus {
     return state;
   }
 
+  public String getTaskStatus() {
+    return taskStatus;
+  }
+
+  public long getOperationStarted() {
+    return operationStarted;
+  }
+
+  public long getOperationCompleted() {
+    return operationCompleted;
+  }
+
   public HiveSQLException getOperationException() {
     return operationException;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/java/org/apache/hive/service/cli/operation/Operation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/Operation.java b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
index 22f725c..d9a273b 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/Operation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/Operation.java
@@ -77,6 +77,9 @@ public abstract class Operation {
   private volatile long lastAccessTime;
   private final long beginTime;
 
+  protected long operationStart;
+  protected long operationComplete;
+
   protected static final EnumSet<FetchOrientation> DEFAULT_FETCH_ORIENTATION_SET =
       EnumSet.of(FetchOrientation.FETCH_NEXT,FetchOrientation.FETCH_FIRST);
 
@@ -137,7 +140,13 @@ public abstract class Operation {
   }
 
   public OperationStatus getStatus() {
-    return new OperationStatus(state, operationException);
+    String taskStatus = null;
+    try {
+      taskStatus = getTaskStatus();
+    } catch (HiveSQLException sqlException) {
+      LOG.error("Error getting task status for " + opHandle.toString(), sqlException);
+    }
+    return new OperationStatus(state, taskStatus, operationStart, operationComplete, operationException);
   }
 
   public boolean hasResultSet() {
@@ -346,6 +355,10 @@ public abstract class Operation {
     return getNextRowSet(FetchOrientation.FETCH_NEXT, DEFAULT_FETCH_MAX_ROWS);
   }
 
+  public String getTaskStatus() throws HiveSQLException {
+    return null;
+  }
+
   /**
    * Verify if the given fetch orientation is part of the default orientation types.
    * @param orientation
@@ -431,5 +444,31 @@ public abstract class Operation {
   }
 
   protected void onNewState(OperationState state, OperationState prevState) {
+    switch(state) {
+      case RUNNING:
+      markOperationStartTime();
+        break;
+      case ERROR:
+      case FINISHED:
+      case CANCELED:
+        markOperationCompletedTime();
+        break;
+    }
+  }
+
+  public long getOperationComplete() {
+    return operationComplete;
+  }
+
+  public long getOperationStart() {
+    return operationStart;
+  }
+
+  protected void markOperationStartTime() {
+    operationStart = System.currentTimeMillis();
+  }
+
+  protected void markOperationCompletedTime() {
+    operationComplete = System.currentTimeMillis();
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
index 100dc6a..04d816a 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java
@@ -18,17 +18,10 @@
 
 package org.apache.hive.service.cli.operation;
 
-import java.io.IOException;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.io.UnsupportedEncodingException;
+import java.io.*;
 import java.security.PrivilegedExceptionAction;
 import java.sql.SQLException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
+import java.util.*;
 import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -44,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Schema;
 import org.apache.hadoop.hive.ql.CommandNeedRetryException;
 import org.apache.hadoop.hive.ql.Driver;
+import org.apache.hadoop.hive.ql.QueryDisplay;
 import org.apache.hadoop.hive.ql.exec.ExplainTask;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.metadata.Hive;
@@ -69,6 +63,9 @@ import org.apache.hive.service.cli.RowSetFactory;
 import org.apache.hive.service.cli.TableSchema;
 import org.apache.hive.service.cli.session.HiveSession;
 import org.apache.hive.service.server.ThreadWithGarbageCleanup;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
 
 /**
  * SQLOperation.
@@ -128,7 +125,6 @@ public class SQLOperation extends ExecuteStatementOperation {
    */
   public void prepare(HiveConf sqlOperationConf) throws HiveSQLException {
     setState(OperationState.RUNNING);
-
     try {
       driver = new Driver(sqlOperationConf, getParentSession().getUserName());
       sqlOpDisplay.setQueryDisplay(driver.getQueryDisplay());
@@ -387,6 +383,38 @@ public class SQLOperation extends ExecuteStatementOperation {
     }
   }
 
+  @Override
+  public String getTaskStatus() throws HiveSQLException {
+    if (driver != null) {
+      List<QueryDisplay.TaskDisplay> statuses = driver.getQueryDisplay().getTaskDisplays();
+      if (statuses != null) {
+        ByteArrayOutputStream out = null;
+        try {
+          ObjectMapper mapper = new ObjectMapper();
+          out = new ByteArrayOutputStream();
+          mapper.writeValue(out, statuses);
+          return out.toString("UTF-8");
+        } catch (JsonGenerationException e) {
+          throw new HiveSQLException(e);
+        } catch (JsonMappingException e) {
+          throw new HiveSQLException(e);
+        } catch (IOException e) {
+          throw new HiveSQLException(e);
+        } finally {
+          if (out != null) {
+            try {
+              out.close();
+            } catch (IOException e) {
+              throw new HiveSQLException(e);
+            }
+          }
+        }
+      }
+    }
+    // Driver not initialized
+    return null;
+  }
+
   private RowSet decode(List<Object> rows, RowSet rowSet) throws Exception {
     if (driver.isFetchingTable()) {
       return prepareFromRow(rows, rowSet);
@@ -508,6 +536,7 @@ public class SQLOperation extends ExecuteStatementOperation {
 
   @Override
   protected void onNewState(OperationState state, OperationState prevState) {
+    super.onNewState(state, prevState);
     currentSQLStateScope = setMetrics(currentSQLStateScope, MetricsConstant.SQL_OPERATION_PREFIX,
       MetricsConstant.COMPLETED_SQL_OPERATION_PREFIX, state);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
index 8dff264..62fcde5 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java
@@ -679,6 +679,9 @@ public abstract class ThriftCLIService extends AbstractService implements TCLISe
           new OperationHandle(req.getOperationHandle()));
       resp.setOperationState(operationStatus.getState().toTOperationState());
       HiveSQLException opException = operationStatus.getOperationException();
+      resp.setTaskStatus(operationStatus.getTaskStatus());
+      resp.setOperationStarted(operationStatus.getOperationStarted());
+      resp.setOperationCompleted(operationStatus.getOperationCompleted());
       if (opException != null) {
         resp.setSqlState(opException.getSQLState());
         resp.setErrorCode(opException.getErrorCode());

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
index 5f01165..ccce6dc 100644
--- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
+++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java
@@ -361,7 +361,8 @@ public class ThriftCLIServiceClient extends CLIServiceClient {
       if (opState == OperationState.ERROR) {
         opException = new HiveSQLException(resp.getErrorMessage(), resp.getSqlState(), resp.getErrorCode());
       }
-      return new OperationStatus(opState, opException);
+      return new OperationStatus(opState, resp.getTaskStatus(), resp.getOperationStarted(),
+        resp.getOperationCompleted(), opException);
     } catch (HiveSQLException e) {
       throw e;
     } catch (Exception e) {

http://git-wip-us.apache.org/repos/asf/hive/blob/de260b45/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
----------------------------------------------------------------------
diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
index e78181a..e145eb4 100644
--- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
+++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java
@@ -18,11 +18,9 @@
 
 package org.apache.hive.service.cli;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.Assert.*;
 
+import java.io.ByteArrayInputStream;
 import java.io.Serializable;
 import java.util.Collections;
 import java.util.HashMap;
@@ -36,15 +34,19 @@ import java.util.concurrent.FutureTask;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hive.ql.ErrorMsg;
+import org.apache.hadoop.hive.ql.QueryDisplay;
 import org.apache.hadoop.hive.ql.exec.Task;
 import org.apache.hadoop.hive.ql.parse.ASTNode;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHook;
 import org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.type.TypeReference;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.session.SessionState;
+
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -169,6 +171,9 @@ public abstract class CLIServiceTest {
     // Blocking execute
     queryString = "SELECT ID+1 FROM TEST_EXEC";
     opHandle = client.executeStatement(sessionHandle, queryString, confOverlay);
+
+    OperationStatus opStatus = client.getOperationStatus(opHandle);
+    checkOperationTimes(opHandle, opStatus);
     // Expect query to be completed now
     assertEquals("Query should be finished",
         OperationState.FINISHED, client.getOperationStatus(opHandle).getState());
@@ -266,6 +271,10 @@ public abstract class CLIServiceTest {
     opHandle = client.executeStatementAsync(sessionHandle, queryString, confOverlay);
     System.out.println("Cancelling " + opHandle);
     client.cancelOperation(opHandle);
+
+    OperationStatus operationStatus = client.getOperationStatus(opHandle);
+    checkOperationTimes(opHandle, operationStatus);
+
     state = client.getOperationStatus(opHandle).getState();
     System.out.println(opHandle + " after cancelling, state= " + state);
     assertEquals("Query should be cancelled", OperationState.CANCELED, state);
@@ -489,7 +498,7 @@ public abstract class CLIServiceTest {
     SessionState.get().setIsHiveServerQuery(true); // Pretend we are in HS2.
 
     String queryString = "SET " + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname
-        + " = false";
+      + " = false";
     client.executeStatement(sessionHandle, queryString, confOverlay);
     return sessionHandle;
   }
@@ -620,4 +629,89 @@ public abstract class CLIServiceTest {
     client.closeOperation(opHandle);
     client.closeSession(sessionHandle);
   }
+
+  @Test
+  public void testTaskStatus() throws Exception {
+    HashMap<String, String> confOverlay = new HashMap<String, String>();
+    String tableName = "TEST_EXEC_ASYNC";
+    String columnDefinitions = "(ID STRING)";
+
+    // Open a session and set up the test data
+    SessionHandle sessionHandle = setupTestData(tableName, columnDefinitions, confOverlay);
+    assertNotNull(sessionHandle);
+    // nonblocking execute
+    String select = "SELECT ID + ' ' FROM TEST_EXEC_ASYNC";
+    OperationHandle ophandle =
+      client.executeStatementAsync(sessionHandle, select, confOverlay);
+
+    OperationStatus status = null;
+    int count = 0;
+    while (true) {
+      status = client.getOperationStatus(ophandle);
+      checkOperationTimes(ophandle, status);
+      OperationState state = status.getState();
+      System.out.println("Polling: " + ophandle + " count=" + (++count)
+        + " state=" + state);
+
+      String jsonTaskStatus = status.getTaskStatus();
+      assertNotNull(jsonTaskStatus);
+      ObjectMapper mapper = new ObjectMapper();
+      ByteArrayInputStream in = new ByteArrayInputStream(jsonTaskStatus.getBytes("UTF-8"));
+      List<QueryDisplay.TaskDisplay> taskStatuses =
+        mapper.readValue(in, new TypeReference<List<QueryDisplay.TaskDisplay>>(){});
+      checkTaskStatuses(taskStatuses);
+      System.out.println("task statuses: " + jsonTaskStatus); // TaskDisplay doesn't have a toString, using json
+      if (OperationState.CANCELED == state || state == OperationState.CLOSED
+        || state == OperationState.FINISHED
+        || state == OperationState.ERROR) {
+        break;
+      }
+      Thread.sleep(1000);
+    }
+  }
+
+  private void checkTaskStatuses(List<QueryDisplay.TaskDisplay> taskDisplays) {
+    assertNotNull(taskDisplays);
+    for (QueryDisplay.TaskDisplay taskDisplay: taskDisplays) {
+      switch (taskDisplay.taskState) {
+        case INITIALIZED:
+        case QUEUED:
+          assertNull(taskDisplay.getBeginTime());
+          assertNull(taskDisplay.getEndTime());
+          assertNull(taskDisplay.getElapsedTime());
+          assertNull(taskDisplay.getErrorMsg());
+          assertNull(taskDisplay.getReturnValue());
+          break;
+        case RUNNING:
+          assertNotNull(taskDisplay.getBeginTime());
+          assertNull(taskDisplay.getEndTime());
+          assertNotNull(taskDisplay.getElapsedTime());
+          assertNull(taskDisplay.getErrorMsg());
+          assertNull(taskDisplay.getReturnValue());
+          break;
+        case FINISHED:
+          assertNotNull(taskDisplay.getBeginTime());
+          assertNotNull(taskDisplay.getEndTime());
+          assertNotNull(taskDisplay.getElapsedTime());
+          break;
+        case UNKNOWN:
+        default:
+          fail("unknown task status: " + taskDisplay);
+      }
+    }
+  }
+
+
+  private void checkOperationTimes(OperationHandle operationHandle, OperationStatus status) {
+    OperationState state = status.getState();
+    assertFalse(status.getOperationStarted() ==  0);
+    if (OperationState.CANCELED == state || state == OperationState.CLOSED
+      || state == OperationState.FINISHED || state == OperationState.ERROR) {
+      System.out.println("##OP " + operationHandle.getHandleIdentifier() + " STATE:" + status.getState()
+        +" START:" + status.getOperationStarted()
+        + " END:" + status.getOperationCompleted());
+      assertFalse(status.getOperationCompleted() ==  0);
+      assertTrue(status.getOperationCompleted() - status.getOperationStarted() >= 0);
+    }
+  }
 }


[24/51] [abbrv] hive git commit: HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)

Posted by jd...@apache.org.
HIVE-13175: Disallow making external tables transactional (Wei Zheng, reviewed by Eugene Koifman)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ff55d0a6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ff55d0a6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ff55d0a6

Branch: refs/heads/llap
Commit: ff55d0a67e59c15b5ccfbdf1317bfd60cf057a30
Parents: 1e8a31e
Author: Wei <wz...@hortonworks.com>
Authored: Thu Mar 10 13:39:13 2016 -0800
Committer: Wei <wz...@hortonworks.com>
Committed: Thu Mar 10 13:39:13 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hive/metastore/TestHiveMetaStore.java       |  1 +
 .../metastore/TransactionalValidationListener.java     | 11 +++++++++++
 .../test/queries/clientnegative/alter_external_acid.q  |  9 +++++++++
 .../test/queries/clientnegative/create_external_acid.q |  6 ++++++
 .../results/clientnegative/alter_external_acid.q.out   | 13 +++++++++++++
 .../results/clientnegative/create_external_acid.q.out  |  5 +++++
 6 files changed, 45 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index a55c186..5da4165 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@ -2944,6 +2944,7 @@ public abstract class TestHiveMetaStore extends TestCase {
 
     tbl.setSd(sd);
     tbl.setLastAccessTime(lastAccessTime);
+    tbl.setTableType(TableType.MANAGED_TABLE.toString());
 
     client.createTable(tbl);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index 96158f8..3e74675 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -86,6 +86,12 @@ final class TransactionalValidationListener extends MetaStorePreEventListener {
         throw new MetaException("The table must be bucketed and stored using an ACID compliant" +
             " format (such as ORC)");
       }
+
+      if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
+        throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() +
+            " cannot be declared transactional because it's an external table");
+      }
+
       return;
     }
     Table oldTable = context.getOldTable();
@@ -144,6 +150,11 @@ final class TransactionalValidationListener extends MetaStorePreEventListener {
             " format (such as ORC)");
       }
 
+      if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
+        throw new MetaException(newTable.getDbName() + "." + newTable.getTableName() +
+            " cannot be declared transactional because it's an external table");
+      }
+
       // normalize prop name
       parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, Boolean.TRUE.toString());
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/alter_external_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/alter_external_acid.q b/ql/src/test/queries/clientnegative/alter_external_acid.q
new file mode 100644
index 0000000..7807278
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/alter_external_acid.q
@@ -0,0 +1,9 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc;
+
+alter table acid_external set TBLPROPERTIES ('transactional'='true');
+
+drop table acid_external;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/queries/clientnegative/create_external_acid.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientnegative/create_external_acid.q b/ql/src/test/queries/clientnegative/create_external_acid.q
new file mode 100644
index 0000000..d6b2d84
--- /dev/null
+++ b/ql/src/test/queries/clientnegative/create_external_acid.q
@@ -0,0 +1,6 @@
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+
+
+create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');
+

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/results/clientnegative/alter_external_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/alter_external_acid.q.out b/ql/src/test/results/clientnegative/alter_external_acid.q.out
new file mode 100644
index 0000000..69bba3b
--- /dev/null
+++ b/ql/src/test/results/clientnegative/alter_external_acid.q.out
@@ -0,0 +1,13 @@
+PREHOOK: query: create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_external
+POSTHOOK: query: create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@acid_external
+PREHOOK: query: alter table acid_external set TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: ALTERTABLE_PROPERTIES
+PREHOOK: Input: default@acid_external
+PREHOOK: Output: default@acid_external
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. default.acid_external cannot be declared transactional because it's an external table

http://git-wip-us.apache.org/repos/asf/hive/blob/ff55d0a6/ql/src/test/results/clientnegative/create_external_acid.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientnegative/create_external_acid.q.out b/ql/src/test/results/clientnegative/create_external_acid.q.out
new file mode 100644
index 0000000..123fe5a
--- /dev/null
+++ b/ql/src/test/results/clientnegative/create_external_acid.q.out
@@ -0,0 +1,5 @@
+PREHOOK: query: create external table acid_external (a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@acid_external
+FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:default.acid_external cannot be declared transactional because it's an external table)