You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/18 18:51:57 UTC

[01/48] hive git commit: HIVE-20135: Fix incompatible change in TimestampColumnVector to default (Jesus Camacho Rodriguez, reviewed by Owen O'Malley)

Repository: hive
Updated Branches:
  refs/heads/master-txnstats 1c9947f38 -> 37a1907be


HIVE-20135: Fix incompatible change in TimestampColumnVector to default (Jesus Camacho Rodriguez, reviewed by Owen O'Malley)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/537c9cb9
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/537c9cb9
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/537c9cb9

Branch: refs/heads/master-txnstats
Commit: 537c9cb9bf750289f1039fdaaf52f0a7310a88f2
Parents: 9c5c940
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Fri Jul 13 20:42:34 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Jul 13 20:42:34 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java   | 2 +-
 .../apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java  | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/537c9cb9/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
index f11a319..3b5f3ba 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/TimestampColumnVector.java
@@ -81,7 +81,7 @@ public class TimestampColumnVector extends ColumnVector {
 
     scratchWritable = null;     // Allocated by caller.
 
-    isUTC = true;
+    isUTC = false;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/537c9cb9/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java b/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
index fbd0c06..d1a546f 100644
--- a/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
+++ b/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/TestStructColumnVector.java
@@ -105,6 +105,7 @@ public class TestStructColumnVector {
     VectorizedRowBatch batch = new VectorizedRowBatch(2);
     LongColumnVector x1 = new LongColumnVector();
     TimestampColumnVector x2 = new TimestampColumnVector();
+    x2.setIsUTC(true);
     StructColumnVector x = new StructColumnVector(1024, x1, x2);
     BytesColumnVector y = new BytesColumnVector();
     batch.cols[0] = x;


[24/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
index f5913fc..d0c299b 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -8742,8 +8742,9 @@ inline std::ostream& operator<<(std::ostream& out, const BasicTxnInfo& obj)
 }
 
 typedef struct _CreationMetadata__isset {
-  _CreationMetadata__isset() : validTxnList(false) {}
+  _CreationMetadata__isset() : validTxnList(false), materializationTime(false) {}
   bool validTxnList :1;
+  bool materializationTime :1;
 } _CreationMetadata__isset;
 
 class CreationMetadata {
@@ -8751,7 +8752,7 @@ class CreationMetadata {
 
   CreationMetadata(const CreationMetadata&);
   CreationMetadata& operator=(const CreationMetadata&);
-  CreationMetadata() : catName(), dbName(), tblName(), validTxnList() {
+  CreationMetadata() : catName(), dbName(), tblName(), validTxnList(), materializationTime(0) {
   }
 
   virtual ~CreationMetadata() throw();
@@ -8760,6 +8761,7 @@ class CreationMetadata {
   std::string tblName;
   std::set<std::string>  tablesUsed;
   std::string validTxnList;
+  int64_t materializationTime;
 
   _CreationMetadata__isset __isset;
 
@@ -8773,6 +8775,8 @@ class CreationMetadata {
 
   void __set_validTxnList(const std::string& val);
 
+  void __set_materializationTime(const int64_t val);
+
   bool operator == (const CreationMetadata & rhs) const
   {
     if (!(catName == rhs.catName))
@@ -8787,6 +8791,10 @@ class CreationMetadata {
       return false;
     else if (__isset.validTxnList && !(validTxnList == rhs.validTxnList))
       return false;
+    if (__isset.materializationTime != rhs.__isset.materializationTime)
+      return false;
+    else if (__isset.materializationTime && !(materializationTime == rhs.materializationTime))
+      return false;
     return true;
   }
   bool operator != (const CreationMetadata &rhs) const {
@@ -10452,52 +10460,23 @@ inline std::ostream& operator<<(std::ostream& out, const TableMeta& obj)
   return out;
 }
 
-typedef struct _Materialization__isset {
-  _Materialization__isset() : validTxnList(false), invalidationTime(false), sourceTablesUpdateDeleteModified(false) {}
-  bool validTxnList :1;
-  bool invalidationTime :1;
-  bool sourceTablesUpdateDeleteModified :1;
-} _Materialization__isset;
 
 class Materialization {
  public:
 
   Materialization(const Materialization&);
   Materialization& operator=(const Materialization&);
-  Materialization() : validTxnList(), invalidationTime(0), sourceTablesUpdateDeleteModified(0) {
+  Materialization() : sourceTablesUpdateDeleteModified(0) {
   }
 
   virtual ~Materialization() throw();
-  std::set<std::string>  tablesUsed;
-  std::string validTxnList;
-  int64_t invalidationTime;
   bool sourceTablesUpdateDeleteModified;
 
-  _Materialization__isset __isset;
-
-  void __set_tablesUsed(const std::set<std::string> & val);
-
-  void __set_validTxnList(const std::string& val);
-
-  void __set_invalidationTime(const int64_t val);
-
   void __set_sourceTablesUpdateDeleteModified(const bool val);
 
   bool operator == (const Materialization & rhs) const
   {
-    if (!(tablesUsed == rhs.tablesUsed))
-      return false;
-    if (__isset.validTxnList != rhs.__isset.validTxnList)
-      return false;
-    else if (__isset.validTxnList && !(validTxnList == rhs.validTxnList))
-      return false;
-    if (__isset.invalidationTime != rhs.__isset.invalidationTime)
-      return false;
-    else if (__isset.invalidationTime && !(invalidationTime == rhs.invalidationTime))
-      return false;
-    if (__isset.sourceTablesUpdateDeleteModified != rhs.__isset.sourceTablesUpdateDeleteModified)
-      return false;
-    else if (__isset.sourceTablesUpdateDeleteModified && !(sourceTablesUpdateDeleteModified == rhs.sourceTablesUpdateDeleteModified))
+    if (!(sourceTablesUpdateDeleteModified == rhs.sourceTablesUpdateDeleteModified))
       return false;
     return true;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
index 611bf6f..281dada 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
@@ -43,6 +43,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)4);
   private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField MATERIALIZATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("materializationTime", org.apache.thrift.protocol.TType.I64, (short)6);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,6 +56,7 @@ import org.slf4j.LoggerFactory;
   private String tblName; // required
   private Set<String> tablesUsed; // required
   private String validTxnList; // optional
+  private long materializationTime; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -62,7 +64,8 @@ import org.slf4j.LoggerFactory;
     DB_NAME((short)2, "dbName"),
     TBL_NAME((short)3, "tblName"),
     TABLES_USED((short)4, "tablesUsed"),
-    VALID_TXN_LIST((short)5, "validTxnList");
+    VALID_TXN_LIST((short)5, "validTxnList"),
+    MATERIALIZATION_TIME((short)6, "materializationTime");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -87,6 +90,8 @@ import org.slf4j.LoggerFactory;
           return TABLES_USED;
         case 5: // VALID_TXN_LIST
           return VALID_TXN_LIST;
+        case 6: // MATERIALIZATION_TIME
+          return MATERIALIZATION_TIME;
         default:
           return null;
       }
@@ -127,7 +132,9 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST};
+  private static final int __MATERIALIZATIONTIME_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST,_Fields.MATERIALIZATION_TIME};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -142,6 +149,8 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
     tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.MATERIALIZATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("materializationTime", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreationMetadata.class, metaDataMap);
   }
@@ -166,6 +175,7 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public CreationMetadata(CreationMetadata other) {
+    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
@@ -182,6 +192,7 @@ import org.slf4j.LoggerFactory;
     if (other.isSetValidTxnList()) {
       this.validTxnList = other.validTxnList;
     }
+    this.materializationTime = other.materializationTime;
   }
 
   public CreationMetadata deepCopy() {
@@ -195,6 +206,8 @@ import org.slf4j.LoggerFactory;
     this.tblName = null;
     this.tablesUsed = null;
     this.validTxnList = null;
+    setMaterializationTimeIsSet(false);
+    this.materializationTime = 0;
   }
 
   public String getCatName() {
@@ -327,6 +340,28 @@ import org.slf4j.LoggerFactory;
     }
   }
 
+  public long getMaterializationTime() {
+    return this.materializationTime;
+  }
+
+  public void setMaterializationTime(long materializationTime) {
+    this.materializationTime = materializationTime;
+    setMaterializationTimeIsSet(true);
+  }
+
+  public void unsetMaterializationTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MATERIALIZATIONTIME_ISSET_ID);
+  }
+
+  /** Returns true if field materializationTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaterializationTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __MATERIALIZATIONTIME_ISSET_ID);
+  }
+
+  public void setMaterializationTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MATERIALIZATIONTIME_ISSET_ID, value);
+  }
+
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
     case CAT_NAME:
@@ -369,6 +404,14 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
+    case MATERIALIZATION_TIME:
+      if (value == null) {
+        unsetMaterializationTime();
+      } else {
+        setMaterializationTime((Long)value);
+      }
+      break;
+
     }
   }
 
@@ -389,6 +432,9 @@ import org.slf4j.LoggerFactory;
     case VALID_TXN_LIST:
       return getValidTxnList();
 
+    case MATERIALIZATION_TIME:
+      return getMaterializationTime();
+
     }
     throw new IllegalStateException();
   }
@@ -410,6 +456,8 @@ import org.slf4j.LoggerFactory;
       return isSetTablesUsed();
     case VALID_TXN_LIST:
       return isSetValidTxnList();
+    case MATERIALIZATION_TIME:
+      return isSetMaterializationTime();
     }
     throw new IllegalStateException();
   }
@@ -472,6 +520,15 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
+    boolean this_present_materializationTime = true && this.isSetMaterializationTime();
+    boolean that_present_materializationTime = true && that.isSetMaterializationTime();
+    if (this_present_materializationTime || that_present_materializationTime) {
+      if (!(this_present_materializationTime && that_present_materializationTime))
+        return false;
+      if (this.materializationTime != that.materializationTime)
+        return false;
+    }
+
     return true;
   }
 
@@ -504,6 +561,11 @@ import org.slf4j.LoggerFactory;
     if (present_validTxnList)
       list.add(validTxnList);
 
+    boolean present_materializationTime = true && (isSetMaterializationTime());
+    list.add(present_materializationTime);
+    if (present_materializationTime)
+      list.add(materializationTime);
+
     return list.hashCode();
   }
 
@@ -565,6 +627,16 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
+    lastComparison = Boolean.valueOf(isSetMaterializationTime()).compareTo(other.isSetMaterializationTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaterializationTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.materializationTime, other.materializationTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
     return 0;
   }
 
@@ -626,6 +698,12 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
+    if (isSetMaterializationTime()) {
+      if (!first) sb.append(", ");
+      sb.append("materializationTime:");
+      sb.append(this.materializationTime);
+      first = false;
+    }
     sb.append(")");
     return sb.toString();
   }
@@ -661,6 +739,8 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -735,6 +815,14 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
+          case 6: // MATERIALIZATION_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.materializationTime = iprot.readI64();
+              struct.setMaterializationTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
           default:
             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
         }
@@ -782,6 +870,11 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
+      if (struct.isSetMaterializationTime()) {
+        oprot.writeFieldBegin(MATERIALIZATION_TIME_FIELD_DESC);
+        oprot.writeI64(struct.materializationTime);
+        oprot.writeFieldEnd();
+      }
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -813,10 +906,16 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetValidTxnList()) {
         optionals.set(0);
       }
-      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMaterializationTime()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
       if (struct.isSetValidTxnList()) {
         oprot.writeString(struct.validTxnList);
       }
+      if (struct.isSetMaterializationTime()) {
+        oprot.writeI64(struct.materializationTime);
+      }
     }
 
     @Override
@@ -839,11 +938,15 @@ import org.slf4j.LoggerFactory;
         }
       }
       struct.setTablesUsedIsSet(true);
-      BitSet incoming = iprot.readBitSet(1);
+      BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         struct.validTxnList = iprot.readString();
         struct.setValidTxnListIsSet(true);
       }
+      if (incoming.get(1)) {
+        struct.materializationTime = iprot.readI64();
+        struct.setMaterializationTimeIsSet(true);
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
index 8f5b4e5..79d9fc6 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
@@ -350,14 +350,14 @@ import org.slf4j.LoggerFactory;
           case 1: // SCHEMA_VERSIONS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
-                struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list944.size);
-                SchemaVersionDescriptor _elem945;
-                for (int _i946 = 0; _i946 < _list944.size; ++_i946)
+                org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
+                struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list936.size);
+                SchemaVersionDescriptor _elem937;
+                for (int _i938 = 0; _i938 < _list936.size; ++_i938)
                 {
-                  _elem945 = new SchemaVersionDescriptor();
-                  _elem945.read(iprot);
-                  struct.schemaVersions.add(_elem945);
+                  _elem937 = new SchemaVersionDescriptor();
+                  _elem937.read(iprot);
+                  struct.schemaVersions.add(_elem937);
                 }
                 iprot.readListEnd();
               }
@@ -383,9 +383,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size()));
-          for (SchemaVersionDescriptor _iter947 : struct.schemaVersions)
+          for (SchemaVersionDescriptor _iter939 : struct.schemaVersions)
           {
-            _iter947.write(oprot);
+            _iter939.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -416,9 +416,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetSchemaVersions()) {
         {
           oprot.writeI32(struct.schemaVersions.size());
-          for (SchemaVersionDescriptor _iter948 : struct.schemaVersions)
+          for (SchemaVersionDescriptor _iter940 : struct.schemaVersions)
           {
-            _iter948.write(oprot);
+            _iter940.write(oprot);
           }
         }
       }
@@ -430,14 +430,14 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list949.size);
-          SchemaVersionDescriptor _elem950;
-          for (int _i951 = 0; _i951 < _list949.size; ++_i951)
+          org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list941.size);
+          SchemaVersionDescriptor _elem942;
+          for (int _i943 = 0; _i943 < _list941.size; ++_i943)
           {
-            _elem950 = new SchemaVersionDescriptor();
-            _elem950.read(iprot);
-            struct.schemaVersions.add(_elem950);
+            _elem942 = new SchemaVersionDescriptor();
+            _elem942.read(iprot);
+            struct.schemaVersions.add(_elem942);
           }
         }
         struct.setSchemaVersionsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
index 3510995..0972c5e 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
@@ -38,10 +38,7 @@ import org.slf4j.LoggerFactory;
 @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Materialization implements org.apache.thrift.TBase<Materialization, Materialization._Fields>, java.io.Serializable, Cloneable, Comparable<Materialization> {
   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Materialization");
 
-  private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)1);
-  private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
-  private static final org.apache.thrift.protocol.TField INVALIDATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("invalidationTime", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC = new org.apache.thrift.protocol.TField("sourceTablesUpdateDeleteModified", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC = new org.apache.thrift.protocol.TField("sourceTablesUpdateDeleteModified", org.apache.thrift.protocol.TType.BOOL, (short)1);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -49,17 +46,11 @@ import org.slf4j.LoggerFactory;
     schemes.put(TupleScheme.class, new MaterializationTupleSchemeFactory());
   }
 
-  private Set<String> tablesUsed; // required
-  private String validTxnList; // optional
-  private long invalidationTime; // optional
-  private boolean sourceTablesUpdateDeleteModified; // optional
+  private boolean sourceTablesUpdateDeleteModified; // required
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-    TABLES_USED((short)1, "tablesUsed"),
-    VALID_TXN_LIST((short)2, "validTxnList"),
-    INVALIDATION_TIME((short)3, "invalidationTime"),
-    SOURCE_TABLES_UPDATE_DELETE_MODIFIED((short)4, "sourceTablesUpdateDeleteModified");
+    SOURCE_TABLES_UPDATE_DELETE_MODIFIED((short)1, "sourceTablesUpdateDeleteModified");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -74,13 +65,7 @@ import org.slf4j.LoggerFactory;
      */
     public static _Fields findByThriftId(int fieldId) {
       switch(fieldId) {
-        case 1: // TABLES_USED
-          return TABLES_USED;
-        case 2: // VALID_TXN_LIST
-          return VALID_TXN_LIST;
-        case 3: // INVALIDATION_TIME
-          return INVALIDATION_TIME;
-        case 4: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
+        case 1: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
           return SOURCE_TABLES_UPDATE_DELETE_MODIFIED;
         default:
           return null;
@@ -122,21 +107,12 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __INVALIDATIONTIME_ISSET_ID = 0;
-  private static final int __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID = 1;
+  private static final int __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST,_Fields.INVALIDATION_TIME,_Fields.SOURCE_TABLES_UPDATE_DELETE_MODIFIED};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-    tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED, 
-        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
-            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.INVALIDATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("invalidationTime", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
-    tmpMap.put(_Fields.SOURCE_TABLES_UPDATE_DELETE_MODIFIED, new org.apache.thrift.meta_data.FieldMetaData("sourceTablesUpdateDeleteModified", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+    tmpMap.put(_Fields.SOURCE_TABLES_UPDATE_DELETE_MODIFIED, new org.apache.thrift.meta_data.FieldMetaData("sourceTablesUpdateDeleteModified", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Materialization.class, metaDataMap);
@@ -146,10 +122,11 @@ import org.slf4j.LoggerFactory;
   }
 
   public Materialization(
-    Set<String> tablesUsed)
+    boolean sourceTablesUpdateDeleteModified)
   {
     this();
-    this.tablesUsed = tablesUsed;
+    this.sourceTablesUpdateDeleteModified = sourceTablesUpdateDeleteModified;
+    setSourceTablesUpdateDeleteModifiedIsSet(true);
   }
 
   /**
@@ -157,14 +134,6 @@ import org.slf4j.LoggerFactory;
    */
   public Materialization(Materialization other) {
     __isset_bitfield = other.__isset_bitfield;
-    if (other.isSetTablesUsed()) {
-      Set<String> __this__tablesUsed = new HashSet<String>(other.tablesUsed);
-      this.tablesUsed = __this__tablesUsed;
-    }
-    if (other.isSetValidTxnList()) {
-      this.validTxnList = other.validTxnList;
-    }
-    this.invalidationTime = other.invalidationTime;
     this.sourceTablesUpdateDeleteModified = other.sourceTablesUpdateDeleteModified;
   }
 
@@ -174,97 +143,10 @@ import org.slf4j.LoggerFactory;
 
   @Override
   public void clear() {
-    this.tablesUsed = null;
-    this.validTxnList = null;
-    setInvalidationTimeIsSet(false);
-    this.invalidationTime = 0;
     setSourceTablesUpdateDeleteModifiedIsSet(false);
     this.sourceTablesUpdateDeleteModified = false;
   }
 
-  public int getTablesUsedSize() {
-    return (this.tablesUsed == null) ? 0 : this.tablesUsed.size();
-  }
-
-  public java.util.Iterator<String> getTablesUsedIterator() {
-    return (this.tablesUsed == null) ? null : this.tablesUsed.iterator();
-  }
-
-  public void addToTablesUsed(String elem) {
-    if (this.tablesUsed == null) {
-      this.tablesUsed = new HashSet<String>();
-    }
-    this.tablesUsed.add(elem);
-  }
-
-  public Set<String> getTablesUsed() {
-    return this.tablesUsed;
-  }
-
-  public void setTablesUsed(Set<String> tablesUsed) {
-    this.tablesUsed = tablesUsed;
-  }
-
-  public void unsetTablesUsed() {
-    this.tablesUsed = null;
-  }
-
-  /** Returns true if field tablesUsed is set (has been assigned a value) and false otherwise */
-  public boolean isSetTablesUsed() {
-    return this.tablesUsed != null;
-  }
-
-  public void setTablesUsedIsSet(boolean value) {
-    if (!value) {
-      this.tablesUsed = null;
-    }
-  }
-
-  public String getValidTxnList() {
-    return this.validTxnList;
-  }
-
-  public void setValidTxnList(String validTxnList) {
-    this.validTxnList = validTxnList;
-  }
-
-  public void unsetValidTxnList() {
-    this.validTxnList = null;
-  }
-
-  /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
-  public boolean isSetValidTxnList() {
-    return this.validTxnList != null;
-  }
-
-  public void setValidTxnListIsSet(boolean value) {
-    if (!value) {
-      this.validTxnList = null;
-    }
-  }
-
-  public long getInvalidationTime() {
-    return this.invalidationTime;
-  }
-
-  public void setInvalidationTime(long invalidationTime) {
-    this.invalidationTime = invalidationTime;
-    setInvalidationTimeIsSet(true);
-  }
-
-  public void unsetInvalidationTime() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID);
-  }
-
-  /** Returns true if field invalidationTime is set (has been assigned a value) and false otherwise */
-  public boolean isSetInvalidationTime() {
-    return EncodingUtils.testBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID);
-  }
-
-  public void setInvalidationTimeIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID, value);
-  }
-
   public boolean isSourceTablesUpdateDeleteModified() {
     return this.sourceTablesUpdateDeleteModified;
   }
@@ -289,30 +171,6 @@ import org.slf4j.LoggerFactory;
 
   public void setFieldValue(_Fields field, Object value) {
     switch (field) {
-    case TABLES_USED:
-      if (value == null) {
-        unsetTablesUsed();
-      } else {
-        setTablesUsed((Set<String>)value);
-      }
-      break;
-
-    case VALID_TXN_LIST:
-      if (value == null) {
-        unsetValidTxnList();
-      } else {
-        setValidTxnList((String)value);
-      }
-      break;
-
-    case INVALIDATION_TIME:
-      if (value == null) {
-        unsetInvalidationTime();
-      } else {
-        setInvalidationTime((Long)value);
-      }
-      break;
-
     case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
       if (value == null) {
         unsetSourceTablesUpdateDeleteModified();
@@ -326,15 +184,6 @@ import org.slf4j.LoggerFactory;
 
   public Object getFieldValue(_Fields field) {
     switch (field) {
-    case TABLES_USED:
-      return getTablesUsed();
-
-    case VALID_TXN_LIST:
-      return getValidTxnList();
-
-    case INVALIDATION_TIME:
-      return getInvalidationTime();
-
     case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
       return isSourceTablesUpdateDeleteModified();
 
@@ -349,12 +198,6 @@ import org.slf4j.LoggerFactory;
     }
 
     switch (field) {
-    case TABLES_USED:
-      return isSetTablesUsed();
-    case VALID_TXN_LIST:
-      return isSetValidTxnList();
-    case INVALIDATION_TIME:
-      return isSetInvalidationTime();
     case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
       return isSetSourceTablesUpdateDeleteModified();
     }
@@ -374,35 +217,8 @@ import org.slf4j.LoggerFactory;
     if (that == null)
       return false;
 
-    boolean this_present_tablesUsed = true && this.isSetTablesUsed();
-    boolean that_present_tablesUsed = true && that.isSetTablesUsed();
-    if (this_present_tablesUsed || that_present_tablesUsed) {
-      if (!(this_present_tablesUsed && that_present_tablesUsed))
-        return false;
-      if (!this.tablesUsed.equals(that.tablesUsed))
-        return false;
-    }
-
-    boolean this_present_validTxnList = true && this.isSetValidTxnList();
-    boolean that_present_validTxnList = true && that.isSetValidTxnList();
-    if (this_present_validTxnList || that_present_validTxnList) {
-      if (!(this_present_validTxnList && that_present_validTxnList))
-        return false;
-      if (!this.validTxnList.equals(that.validTxnList))
-        return false;
-    }
-
-    boolean this_present_invalidationTime = true && this.isSetInvalidationTime();
-    boolean that_present_invalidationTime = true && that.isSetInvalidationTime();
-    if (this_present_invalidationTime || that_present_invalidationTime) {
-      if (!(this_present_invalidationTime && that_present_invalidationTime))
-        return false;
-      if (this.invalidationTime != that.invalidationTime)
-        return false;
-    }
-
-    boolean this_present_sourceTablesUpdateDeleteModified = true && this.isSetSourceTablesUpdateDeleteModified();
-    boolean that_present_sourceTablesUpdateDeleteModified = true && that.isSetSourceTablesUpdateDeleteModified();
+    boolean this_present_sourceTablesUpdateDeleteModified = true;
+    boolean that_present_sourceTablesUpdateDeleteModified = true;
     if (this_present_sourceTablesUpdateDeleteModified || that_present_sourceTablesUpdateDeleteModified) {
       if (!(this_present_sourceTablesUpdateDeleteModified && that_present_sourceTablesUpdateDeleteModified))
         return false;
@@ -417,22 +233,7 @@ import org.slf4j.LoggerFactory;
   public int hashCode() {
     List<Object> list = new ArrayList<Object>();
 
-    boolean present_tablesUsed = true && (isSetTablesUsed());
-    list.add(present_tablesUsed);
-    if (present_tablesUsed)
-      list.add(tablesUsed);
-
-    boolean present_validTxnList = true && (isSetValidTxnList());
-    list.add(present_validTxnList);
-    if (present_validTxnList)
-      list.add(validTxnList);
-
-    boolean present_invalidationTime = true && (isSetInvalidationTime());
-    list.add(present_invalidationTime);
-    if (present_invalidationTime)
-      list.add(invalidationTime);
-
-    boolean present_sourceTablesUpdateDeleteModified = true && (isSetSourceTablesUpdateDeleteModified());
+    boolean present_sourceTablesUpdateDeleteModified = true;
     list.add(present_sourceTablesUpdateDeleteModified);
     if (present_sourceTablesUpdateDeleteModified)
       list.add(sourceTablesUpdateDeleteModified);
@@ -448,36 +249,6 @@ import org.slf4j.LoggerFactory;
 
     int lastComparison = 0;
 
-    lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTablesUsed()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetValidTxnList()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
-    lastComparison = Boolean.valueOf(isSetInvalidationTime()).compareTo(other.isSetInvalidationTime());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetInvalidationTime()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.invalidationTime, other.invalidationTime);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetSourceTablesUpdateDeleteModified()).compareTo(other.isSetSourceTablesUpdateDeleteModified());
     if (lastComparison != 0) {
       return lastComparison;
@@ -508,43 +279,17 @@ import org.slf4j.LoggerFactory;
     StringBuilder sb = new StringBuilder("Materialization(");
     boolean first = true;
 
-    sb.append("tablesUsed:");
-    if (this.tablesUsed == null) {
-      sb.append("null");
-    } else {
-      sb.append(this.tablesUsed);
-    }
+    sb.append("sourceTablesUpdateDeleteModified:");
+    sb.append(this.sourceTablesUpdateDeleteModified);
     first = false;
-    if (isSetValidTxnList()) {
-      if (!first) sb.append(", ");
-      sb.append("validTxnList:");
-      if (this.validTxnList == null) {
-        sb.append("null");
-      } else {
-        sb.append(this.validTxnList);
-      }
-      first = false;
-    }
-    if (isSetInvalidationTime()) {
-      if (!first) sb.append(", ");
-      sb.append("invalidationTime:");
-      sb.append(this.invalidationTime);
-      first = false;
-    }
-    if (isSetSourceTablesUpdateDeleteModified()) {
-      if (!first) sb.append(", ");
-      sb.append("sourceTablesUpdateDeleteModified:");
-      sb.append(this.sourceTablesUpdateDeleteModified);
-      first = false;
-    }
     sb.append(")");
     return sb.toString();
   }
 
   public void validate() throws org.apache.thrift.TException {
     // check for required fields
-    if (!isSetTablesUsed()) {
-      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablesUsed' is unset! Struct:" + toString());
+    if (!isSetSourceTablesUpdateDeleteModified()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'sourceTablesUpdateDeleteModified' is unset! Struct:" + toString());
     }
 
     // check for sub-struct validity
@@ -586,41 +331,7 @@ import org.slf4j.LoggerFactory;
           break;
         }
         switch (schemeField.id) {
-          case 1: // TABLES_USED
-            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
-              {
-                org.apache.thrift.protocol.TSet _set864 = iprot.readSetBegin();
-                struct.tablesUsed = new HashSet<String>(2*_set864.size);
-                String _elem865;
-                for (int _i866 = 0; _i866 < _set864.size; ++_i866)
-                {
-                  _elem865 = iprot.readString();
-                  struct.tablesUsed.add(_elem865);
-                }
-                iprot.readSetEnd();
-              }
-              struct.setTablesUsedIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 2: // VALID_TXN_LIST
-            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
-              struct.validTxnList = iprot.readString();
-              struct.setValidTxnListIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 3: // INVALIDATION_TIME
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.invalidationTime = iprot.readI64();
-              struct.setInvalidationTimeIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
+          case 1: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
               struct.sourceTablesUpdateDeleteModified = iprot.readBool();
               struct.setSourceTablesUpdateDeleteModifiedIsSet(true);
@@ -641,35 +352,9 @@ import org.slf4j.LoggerFactory;
       struct.validate();
 
       oprot.writeStructBegin(STRUCT_DESC);
-      if (struct.tablesUsed != null) {
-        oprot.writeFieldBegin(TABLES_USED_FIELD_DESC);
-        {
-          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size()));
-          for (String _iter867 : struct.tablesUsed)
-          {
-            oprot.writeString(_iter867);
-          }
-          oprot.writeSetEnd();
-        }
-        oprot.writeFieldEnd();
-      }
-      if (struct.validTxnList != null) {
-        if (struct.isSetValidTxnList()) {
-          oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC);
-          oprot.writeString(struct.validTxnList);
-          oprot.writeFieldEnd();
-        }
-      }
-      if (struct.isSetInvalidationTime()) {
-        oprot.writeFieldBegin(INVALIDATION_TIME_FIELD_DESC);
-        oprot.writeI64(struct.invalidationTime);
-        oprot.writeFieldEnd();
-      }
-      if (struct.isSetSourceTablesUpdateDeleteModified()) {
-        oprot.writeFieldBegin(SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC);
-        oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
-        oprot.writeFieldEnd();
-      }
+      oprot.writeFieldBegin(SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC);
+      oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
+      oprot.writeFieldEnd();
       oprot.writeFieldStop();
       oprot.writeStructEnd();
     }
@@ -687,62 +372,14 @@ import org.slf4j.LoggerFactory;
     @Override
     public void write(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
       TTupleProtocol oprot = (TTupleProtocol) prot;
-      {
-        oprot.writeI32(struct.tablesUsed.size());
-        for (String _iter868 : struct.tablesUsed)
-        {
-          oprot.writeString(_iter868);
-        }
-      }
-      BitSet optionals = new BitSet();
-      if (struct.isSetValidTxnList()) {
-        optionals.set(0);
-      }
-      if (struct.isSetInvalidationTime()) {
-        optionals.set(1);
-      }
-      if (struct.isSetSourceTablesUpdateDeleteModified()) {
-        optionals.set(2);
-      }
-      oprot.writeBitSet(optionals, 3);
-      if (struct.isSetValidTxnList()) {
-        oprot.writeString(struct.validTxnList);
-      }
-      if (struct.isSetInvalidationTime()) {
-        oprot.writeI64(struct.invalidationTime);
-      }
-      if (struct.isSetSourceTablesUpdateDeleteModified()) {
-        oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
-      }
+      oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
     }
 
     @Override
     public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
       TTupleProtocol iprot = (TTupleProtocol) prot;
-      {
-        org.apache.thrift.protocol.TSet _set869 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-        struct.tablesUsed = new HashSet<String>(2*_set869.size);
-        String _elem870;
-        for (int _i871 = 0; _i871 < _set869.size; ++_i871)
-        {
-          _elem870 = iprot.readString();
-          struct.tablesUsed.add(_elem870);
-        }
-      }
-      struct.setTablesUsedIsSet(true);
-      BitSet incoming = iprot.readBitSet(3);
-      if (incoming.get(0)) {
-        struct.validTxnList = iprot.readString();
-        struct.setValidTxnListIsSet(true);
-      }
-      if (incoming.get(1)) {
-        struct.invalidationTime = iprot.readI64();
-        struct.setInvalidationTimeIsSet(true);
-      }
-      if (incoming.get(2)) {
-        struct.sourceTablesUpdateDeleteModified = iprot.readBool();
-        struct.setSourceTablesUpdateDeleteModifiedIsSet(true);
-      }
+      struct.sourceTablesUpdateDeleteModified = iprot.readBool();
+      struct.setSourceTablesUpdateDeleteModifiedIsSet(true);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
index 88d7e3f..935af04 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
@@ -1119,14 +1119,14 @@ import org.slf4j.LoggerFactory;
           case 4: // COLS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
-                struct.cols = new ArrayList<FieldSchema>(_list936.size);
-                FieldSchema _elem937;
-                for (int _i938 = 0; _i938 < _list936.size; ++_i938)
+                org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
+                struct.cols = new ArrayList<FieldSchema>(_list928.size);
+                FieldSchema _elem929;
+                for (int _i930 = 0; _i930 < _list928.size; ++_i930)
                 {
-                  _elem937 = new FieldSchema();
-                  _elem937.read(iprot);
-                  struct.cols.add(_elem937);
+                  _elem929 = new FieldSchema();
+                  _elem929.read(iprot);
+                  struct.cols.add(_elem929);
                 }
                 iprot.readListEnd();
               }
@@ -1212,9 +1212,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(COLS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size()));
-          for (FieldSchema _iter939 : struct.cols)
+          for (FieldSchema _iter931 : struct.cols)
           {
-            _iter939.write(oprot);
+            _iter931.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -1323,9 +1323,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCols()) {
         {
           oprot.writeI32(struct.cols.size());
-          for (FieldSchema _iter940 : struct.cols)
+          for (FieldSchema _iter932 : struct.cols)
           {
-            _iter940.write(oprot);
+            _iter932.write(oprot);
           }
         }
       }
@@ -1368,14 +1368,14 @@ import org.slf4j.LoggerFactory;
       }
       if (incoming.get(3)) {
         {
-          org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.cols = new ArrayList<FieldSchema>(_list941.size);
-          FieldSchema _elem942;
-          for (int _i943 = 0; _i943 < _list941.size; ++_i943)
+          org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.cols = new ArrayList<FieldSchema>(_list933.size);
+          FieldSchema _elem934;
+          for (int _i935 = 0; _i935 < _list933.size; ++_i935)
           {
-            _elem942 = new FieldSchema();
-            _elem942.read(iprot);
-            struct.cols.add(_elem942);
+            _elem934 = new FieldSchema();
+            _elem934.read(iprot);
+            struct.cols.add(_elem934);
           }
         }
         struct.setColsIsSet(true);


[48/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/37a1907b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/37a1907b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/37a1907b

Branch: refs/heads/master-txnstats
Commit: 37a1907be4ecff19777a86f8b3d860861613af15
Parents: 1c9947f bac1d98
Author: sergey <se...@apache.org>
Authored: Wed Jul 18 11:51:35 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Wed Jul 18 11:51:35 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   32 +-
 .../apache/hadoop/hive/hbase/HBaseMetaHook.java |   56 +-
 .../src/test/queries/negative/cascade_dbdrop.q  |    4 +-
 .../generatehfiles_require_family_path.q        |    5 +-
 .../src/test/queries/negative/hbase_ddl.q       |    4 +-
 .../positive/drop_database_table_hooks.q        |   57 +
 .../test/queries/positive/external_table_ppd.q  |    5 +-
 .../test/queries/positive/hbase_binary_binary.q |    5 +-
 .../queries/positive/hbase_binary_map_queries.q |   13 +-
 .../positive/hbase_binary_map_queries_prefix.q  |    9 +-
 .../positive/hbase_binary_storage_queries.q     |   12 +-
 .../test/queries/positive/hbase_custom_key.q    |    5 +-
 .../test/queries/positive/hbase_custom_key2.q   |    5 +-
 .../test/queries/positive/hbase_custom_key3.q   |    5 +-
 .../src/test/queries/positive/hbase_ddl.q       |    4 +-
 .../queries/positive/hbase_decimal_decimal.q    |    5 +-
 .../test/queries/positive/hbase_handler_bulk.q  |   11 +-
 .../src/test/queries/positive/hbase_joins.q     |   25 +-
 .../queries/positive/hbase_null_first_col.q     |    5 +-
 .../src/test/queries/positive/hbase_ppd_join.q  |    8 +-
 .../test/queries/positive/hbase_ppd_key_range.q |    5 +-
 .../src/test/queries/positive/hbase_pushdown.q  |    5 +-
 .../src/test/queries/positive/hbase_queries.q   |   54 +-
 .../test/queries/positive/hbase_scan_params.q   |    5 +-
 .../hbase_single_sourced_multi_insert.q         |    5 +-
 .../src/test/queries/positive/hbase_timestamp.q |   20 +-
 .../queries/positive/hbase_timestamp_format.q   |    4 +-
 .../src/test/queries/positive/hbase_viewjoins.q |   10 +-
 .../src/test/queries/positive/hbasestats.q      |    5 +-
 .../src/test/queries/positive/ppd_key_ranges.q  |    5 +-
 .../test/results/negative/cascade_dbdrop.q.out  |    8 +-
 .../generatehfiles_require_family_path.q.out    |    6 +-
 .../src/test/results/negative/hbase_ddl.q.out   |    8 +-
 .../positive/drop_database_table_hooks.q.out    |  258 ++
 .../results/positive/external_table_ppd.q.out   |   14 +-
 .../results/positive/hbase_binary_binary.q.out  |    6 +-
 .../positive/hbase_binary_map_queries.q.out     |   22 +-
 .../hbase_binary_map_queries_prefix.q.out       |   14 +-
 .../positive/hbase_binary_storage_queries.q.out |   28 +-
 .../results/positive/hbase_custom_key.q.out     |    6 +-
 .../results/positive/hbase_custom_key2.q.out    |    6 +-
 .../results/positive/hbase_custom_key3.q.out    |    6 +-
 .../src/test/results/positive/hbase_ddl.q.out   |   16 +-
 .../positive/hbase_decimal_decimal.q.out        |  Bin 1758 -> 1872 bytes
 .../results/positive/hbase_handler_bulk.q.out   |   16 +-
 .../src/test/results/positive/hbase_joins.q.out |   30 +-
 .../results/positive/hbase_null_first_col.q.out |    6 +-
 .../test/results/positive/hbase_ppd_join.q.out  |   12 +-
 .../results/positive/hbase_ppd_key_range.q.out  |    6 +-
 .../test/results/positive/hbase_pushdown.q.out  |    6 +-
 .../test/results/positive/hbase_queries.q.out   |   68 +-
 .../results/positive/hbase_scan_params.q.out    |    6 +-
 .../hbase_single_sourced_multi_insert.q.out     |    6 +-
 .../test/results/positive/hbase_timestamp.q.out |   24 +-
 .../positive/hbase_timestamp_format.q.out       |    8 +-
 .../test/results/positive/hbase_viewjoins.q.out |   20 +-
 .../src/test/results/positive/hbasestats.q.out  |   26 +-
 .../test/results/positive/ppd_key_ranges.q.out  |    6 +-
 .../apache/hive/jdbc/TestRestrictedList.java    |    2 +-
 .../test/resources/testconfiguration.properties |    1 +
 .../hive/llap/io/api/impl/LlapRecordReader.java |   60 +-
 .../UDAFTemplates/VectorUDAFAvg.txt             |  108 +-
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt      |   83 +-
 .../VectorUDAFAvgDecimal64ToDecimal.txt         |  110 +-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |   35 +-
 .../UDAFTemplates/VectorUDAFAvgMerge.txt        |   35 +-
 .../UDAFTemplates/VectorUDAFAvgTimestamp.txt    |  136 +-
 .../UDAFTemplates/VectorUDAFMinMax.txt          |   38 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |   98 +-
 .../VectorUDAFMinMaxIntervalDayTime.txt         |  102 +-
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |   50 +-
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt |  104 +-
 .../UDAFTemplates/VectorUDAFSum.txt             |   34 +-
 .../UDAFTemplates/VectorUDAFVar.txt             |    9 +-
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |   19 +-
 .../UDAFTemplates/VectorUDAFVarMerge.txt        |   60 +-
 .../UDAFTemplates/VectorUDAFVarTimestamp.txt    |   19 +-
 .../hive/ql/exec/MaterializedViewTask.java      |    2 -
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java  |   52 +-
 .../apache/hadoop/hive/ql/exec/tez/TezTask.java |    4 +
 .../ql/exec/vector/VectorAggregationDesc.java   |   19 +-
 .../aggregates/VectorAggregateExpression.java   |    2 +-
 .../aggregates/VectorUDAFCountMerge.java        |   34 +-
 .../aggregates/VectorUDAFSumDecimal.java        |   37 +-
 .../aggregates/VectorUDAFSumDecimal64.java      |   34 +-
 .../VectorUDAFSumDecimal64ToDecimal.java        |   34 +-
 .../aggregates/VectorUDAFSumTimestamp.java      |   34 +-
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |    1 -
 .../hadoop/hive/ql/log/LogDivertAppender.java   |    6 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  122 +-
 .../hadoop/hive/ql/metadata/TableIterable.java  |  104 -
 .../hive/ql/optimizer/physical/Vectorizer.java  |   54 +-
 .../apache/hadoop/hive/ql/parse/ASTNode.java    |   31 +-
 .../hadoop/hive/ql/parse/ASTNodeOrigin.java     |    4 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   64 +-
 .../hive/ql/parse/ImmutableCommonToken.java     |  107 +
 .../hadoop/hive/ql/parse/ParseDriver.java       |   16 +-
 .../org/apache/hadoop/hive/ql/parse/QBExpr.java |    5 +-
 .../hadoop/hive/ql/parse/QBParseInfo.java       |    5 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |   21 +-
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |   11 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |  444 ++-
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |    4 +-
 .../hive/ql/ppd/ExprWalkerProcFactory.java      |   30 +-
 .../hive/ql/ppd/SyntheticJoinPredicate.java     |  174 +-
 .../hive/ql/udf/generic/GenericUDAFAverage.java |   21 +
 .../ql/udf/generic/GenericUDAFVariance.java     |   19 +-
 .../hive/ql/exec/repl/ReplDumpTaskTest.java     |  126 +
 .../exec/vector/TestVectorGroupByOperator.java  |   52 +-
 .../ql/exec/vector/VectorRandomBatchSource.java |   51 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |  253 +-
 .../vector/aggregation/AggregationBase.java     |  473 +++
 .../aggregation/TestVectorAggregation.java      |  664 ++++
 .../expressions/TestVectorDateAddSub.java       |    2 +
 .../vector/expressions/TestVectorDateDiff.java  |    2 +
 .../expressions/TestVectorIfStatement.java      |    4 +
 .../vector/expressions/TestVectorNegative.java  |    5 +
 .../expressions/TestVectorStringConcat.java     |    4 +
 .../expressions/TestVectorStringUnary.java      |    2 +
 .../vector/expressions/TestVectorSubStr.java    |    2 +
 .../expressions/TestVectorTimestampExtract.java |    4 +
 .../hive/ql/metadata/TestTableIterable.java     |   67 -
 .../ql/optimizer/physical/TestVectorizer.java   |    5 +-
 .../dynamic_semijoin_reduction_sw2.q            |   59 +
 ...terialized_view_create_rewrite_time_window.q |    4 +-
 .../clientpositive/ppd_deterministic_expr.q     |  143 +
 .../test/queries/clientpositive/ppd_udf_col.q   |   48 +
 .../clientpositive/druid/druidmini_mv.q.out     |   85 +-
 .../clientpositive/llap/check_constraint.q.out  |   17 +-
 .../llap/dynamic_semijoin_reduction_sw2.q.out   |  450 +++
 .../llap/enforce_constraint_notnull.q.out       |   17 +-
 .../clientpositive/llap/explainuser_1.q.out     |   12 +-
 .../results/clientpositive/llap/lineage3.q.out  |    2 +-
 .../materialized_view_create_rewrite_5.q.out    |    4 +-
 ...alized_view_create_rewrite_time_window.q.out |   16 +-
 .../llap/materialized_view_rewrite_empty.q.out  |    4 +-
 .../llap/orc_ppd_schema_evol_3a.q.out           |   52 +-
 .../clientpositive/llap/subquery_in.q.out       |   22 +-
 .../clientpositive/llap/subquery_notin.q.out    |   68 +-
 .../llap/tez_fixed_bucket_pruning.q.out         |    8 +-
 .../clientpositive/masking_disablecbo_2.q.out   |  219 +-
 .../clientpositive/perf/tez/query1.q.out        |   76 +-
 .../clientpositive/perf/tez/query16.q.out       |  118 +-
 .../clientpositive/perf/tez/query17.q.out       |  197 +-
 .../clientpositive/perf/tez/query18.q.out       |  124 +-
 .../clientpositive/perf/tez/query2.q.out        |  116 +-
 .../clientpositive/perf/tez/query23.q.out       |  444 +--
 .../clientpositive/perf/tez/query24.q.out       |  252 +-
 .../clientpositive/perf/tez/query25.q.out       |  188 +-
 .../clientpositive/perf/tez/query29.q.out       |  148 +-
 .../clientpositive/perf/tez/query31.q.out       |  322 +-
 .../clientpositive/perf/tez/query32.q.out       |  140 +-
 .../clientpositive/perf/tez/query39.q.out       |   94 +-
 .../clientpositive/perf/tez/query40.q.out       |   92 +-
 .../clientpositive/perf/tez/query54.q.out       |  246 +-
 .../clientpositive/perf/tez/query59.q.out       |  134 +-
 .../clientpositive/perf/tez/query64.q.out       |  760 ++---
 .../clientpositive/perf/tez/query69.q.out       |  144 +-
 .../clientpositive/perf/tez/query72.q.out       |  178 +-
 .../clientpositive/perf/tez/query77.q.out       |  248 +-
 .../clientpositive/perf/tez/query78.q.out       |  136 +-
 .../clientpositive/perf/tez/query8.q.out        |  116 +-
 .../clientpositive/perf/tez/query80.q.out       |  336 +-
 .../clientpositive/perf/tez/query91.q.out       |   74 +-
 .../clientpositive/perf/tez/query92.q.out       |  174 +-
 .../clientpositive/perf/tez/query94.q.out       |  118 +-
 .../clientpositive/perf/tez/query95.q.out       |  241 +-
 .../clientpositive/ppd_deterministic_expr.q.out |  553 ++++
 .../results/clientpositive/ppd_udf_col.q.out    |  409 +++
 .../spark_dynamic_partition_pruning_3.q.out     |    3 +-
 .../results/clientpositive/union_offcbo.q.out   |   34 +-
 .../cli/operation/GetColumnsOperation.java      |    2 +-
 .../apache/hive/service/server/HiveServer2.java |   35 +-
 .../metastore/api/AlterPartitionsRequest.java   |   36 +-
 .../hive/metastore/api/CreationMetadata.java    |  111 +-
 .../metastore/api/FindSchemasByColsResp.java    |   36 +-
 .../hive/metastore/api/Materialization.java     |  409 +--
 .../hive/metastore/api/SchemaVersion.java       |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2858 +++++++++---------
 .../hive/metastore/api/WMFullResourcePlan.java  |  144 +-
 .../api/WMGetAllResourcePlanResponse.java       |   36 +-
 .../WMGetTriggersForResourePlanResponse.java    |   36 +-
 .../api/WMValidateResourcePlanResponse.java     |   64 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1510 +++++----
 .../src/gen/thrift/gen-php/metastore/Types.php  |  344 +--
 .../hive_metastore/ThriftHiveMetastore-remote   |    4 +-
 .../hive_metastore/ThriftHiveMetastore.py       | 1015 +++----
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  222 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   16 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   20 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   11 +-
 .../hive/metastore/HiveMetaStoreClient.java     |  124 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    2 +-
 .../MaterializationsCacheCleanerTask.java       |   63 -
 .../MaterializationsInvalidationCache.java      |  543 ----
 .../MaterializationsRebuildLockCleanerTask.java |   30 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   20 +-
 .../hadoop/hive/metastore/TableIterable.java    |  115 +
 .../hive/metastore/conf/MetastoreConf.java      |    6 +-
 .../hive/metastore/model/MCreationMetadata.java |   16 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   13 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  309 +-
 .../hadoop/hive/metastore/txn/TxnStore.java     |   37 +-
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   14 +-
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   14 +-
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   19 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   17 +-
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   18 +-
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   19 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   12 +-
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   12 +-
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   20 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   14 +-
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   14 +-
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |   19 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   14 +-
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   26 +-
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |   19 +
 .../src/main/thrift/hive_metastore.thrift       |    8 +-
 .../HiveMetaStoreClientPreCatalog.java          |    7 +-
 ...stMetaStoreMaterializationsCacheCleaner.java |  328 --
 .../hive/metastore/TestTableIterable.java       |   76 +
 .../TestTablesCreateDropAlterTruncate.java      |    1 +
 .../ql/exec/vector/TimestampColumnVector.java   |    2 +-
 .../ql/exec/vector/TestStructColumnVector.java  |    1 +
 .../apache/hive/streaming/TestStreaming.java    |   21 +-
 226 files changed, 12148 insertions(+), 9560 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
index 9d0e014,0000000..45832a4
mode 100644,000000..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@@ -1,1178 -1,0 +1,1178 @@@
 +/**
 + * Autogenerated by Thrift Compiler (0.9.3)
 + *
 + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
 + *  @generated
 + */
 +package org.apache.hadoop.hive.metastore.api;
 +
 +import org.apache.thrift.scheme.IScheme;
 +import org.apache.thrift.scheme.SchemeFactory;
 +import org.apache.thrift.scheme.StandardScheme;
 +
 +import org.apache.thrift.scheme.TupleScheme;
 +import org.apache.thrift.protocol.TTupleProtocol;
 +import org.apache.thrift.protocol.TProtocolException;
 +import org.apache.thrift.EncodingUtils;
 +import org.apache.thrift.TException;
 +import org.apache.thrift.async.AsyncMethodCallback;
 +import org.apache.thrift.server.AbstractNonblockingServer.*;
 +import java.util.List;
 +import java.util.ArrayList;
 +import java.util.Map;
 +import java.util.HashMap;
 +import java.util.EnumMap;
 +import java.util.Set;
 +import java.util.HashSet;
 +import java.util.EnumSet;
 +import java.util.Collections;
 +import java.util.BitSet;
 +import java.nio.ByteBuffer;
 +import java.util.Arrays;
 +import javax.annotation.Generated;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
 +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
 +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsRequest implements org.apache.thrift.TBase<AlterPartitionsRequest, AlterPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AlterPartitionsRequest> {
 +  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsRequest");
 +
 +  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
 +  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
 +  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
 +  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)4);
 +  private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)5);
 +  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
 +  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)7);
 +  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
 +
 +  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
 +  static {
 +    schemes.put(StandardScheme.class, new AlterPartitionsRequestStandardSchemeFactory());
 +    schemes.put(TupleScheme.class, new AlterPartitionsRequestTupleSchemeFactory());
 +  }
 +
 +  private String catName; // optional
 +  private String dbName; // required
 +  private String tableName; // required
 +  private List<Partition> partitions; // required
 +  private EnvironmentContext environmentContext; // optional
 +  private long txnId; // optional
 +  private long writeId; // optional
 +  private String validWriteIdList; // optional
 +
 +  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
 +  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
 +    CAT_NAME((short)1, "catName"),
 +    DB_NAME((short)2, "dbName"),
 +    TABLE_NAME((short)3, "tableName"),
 +    PARTITIONS((short)4, "partitions"),
 +    ENVIRONMENT_CONTEXT((short)5, "environmentContext"),
 +    TXN_ID((short)6, "txnId"),
 +    WRITE_ID((short)7, "writeId"),
 +    VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
 +
 +    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 +
 +    static {
 +      for (_Fields field : EnumSet.allOf(_Fields.class)) {
 +        byName.put(field.getFieldName(), field);
 +      }
 +    }
 +
 +    /**
 +     * Find the _Fields constant that matches fieldId, or null if its not found.
 +     */
 +    public static _Fields findByThriftId(int fieldId) {
 +      switch(fieldId) {
 +        case 1: // CAT_NAME
 +          return CAT_NAME;
 +        case 2: // DB_NAME
 +          return DB_NAME;
 +        case 3: // TABLE_NAME
 +          return TABLE_NAME;
 +        case 4: // PARTITIONS
 +          return PARTITIONS;
 +        case 5: // ENVIRONMENT_CONTEXT
 +          return ENVIRONMENT_CONTEXT;
 +        case 6: // TXN_ID
 +          return TXN_ID;
 +        case 7: // WRITE_ID
 +          return WRITE_ID;
 +        case 8: // VALID_WRITE_ID_LIST
 +          return VALID_WRITE_ID_LIST;
 +        default:
 +          return null;
 +      }
 +    }
 +
 +    /**
 +     * Find the _Fields constant that matches fieldId, throwing an exception
 +     * if it is not found.
 +     */
 +    public static _Fields findByThriftIdOrThrow(int fieldId) {
 +      _Fields fields = findByThriftId(fieldId);
 +      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
 +      return fields;
 +    }
 +
 +    /**
 +     * Find the _Fields constant that matches name, or null if its not found.
 +     */
 +    public static _Fields findByName(String name) {
 +      return byName.get(name);
 +    }
 +
 +    private final short _thriftId;
 +    private final String _fieldName;
 +
 +    _Fields(short thriftId, String fieldName) {
 +      _thriftId = thriftId;
 +      _fieldName = fieldName;
 +    }
 +
 +    public short getThriftFieldId() {
 +      return _thriftId;
 +    }
 +
 +    public String getFieldName() {
 +      return _fieldName;
 +    }
 +  }
 +
 +  // isset id assignments
 +  private static final int __TXNID_ISSET_ID = 0;
 +  private static final int __WRITEID_ISSET_ID = 1;
 +  private byte __isset_bitfield = 0;
 +  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.ENVIRONMENT_CONTEXT,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
 +  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
 +  static {
 +    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
 +    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
 +    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
 +    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
 +    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
 +        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
 +            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
 +    tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 +        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
 +    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
 +    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
 +    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
 +        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
 +    metaDataMap = Collections.unmodifiableMap(tmpMap);
 +    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsRequest.class, metaDataMap);
 +  }
 +
 +  public AlterPartitionsRequest() {
 +    this.txnId = -1L;
 +
 +    this.writeId = -1L;
 +
 +  }
 +
 +  public AlterPartitionsRequest(
 +    String dbName,
 +    String tableName,
 +    List<Partition> partitions)
 +  {
 +    this();
 +    this.dbName = dbName;
 +    this.tableName = tableName;
 +    this.partitions = partitions;
 +  }
 +
 +  /**
 +   * Performs a deep copy on <i>other</i>.
 +   */
 +  public AlterPartitionsRequest(AlterPartitionsRequest other) {
 +    __isset_bitfield = other.__isset_bitfield;
 +    if (other.isSetCatName()) {
 +      this.catName = other.catName;
 +    }
 +    if (other.isSetDbName()) {
 +      this.dbName = other.dbName;
 +    }
 +    if (other.isSetTableName()) {
 +      this.tableName = other.tableName;
 +    }
 +    if (other.isSetPartitions()) {
 +      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
 +      for (Partition other_element : other.partitions) {
 +        __this__partitions.add(new Partition(other_element));
 +      }
 +      this.partitions = __this__partitions;
 +    }
 +    if (other.isSetEnvironmentContext()) {
 +      this.environmentContext = new EnvironmentContext(other.environmentContext);
 +    }
 +    this.txnId = other.txnId;
 +    this.writeId = other.writeId;
 +    if (other.isSetValidWriteIdList()) {
 +      this.validWriteIdList = other.validWriteIdList;
 +    }
 +  }
 +
 +  public AlterPartitionsRequest deepCopy() {
 +    return new AlterPartitionsRequest(this);
 +  }
 +
 +  @Override
 +  public void clear() {
 +    this.catName = null;
 +    this.dbName = null;
 +    this.tableName = null;
 +    this.partitions = null;
 +    this.environmentContext = null;
 +    this.txnId = -1L;
 +
 +    this.writeId = -1L;
 +
 +    this.validWriteIdList = null;
 +  }
 +
 +  public String getCatName() {
 +    return this.catName;
 +  }
 +
 +  public void setCatName(String catName) {
 +    this.catName = catName;
 +  }
 +
 +  public void unsetCatName() {
 +    this.catName = null;
 +  }
 +
 +  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
 +  public boolean isSetCatName() {
 +    return this.catName != null;
 +  }
 +
 +  public void setCatNameIsSet(boolean value) {
 +    if (!value) {
 +      this.catName = null;
 +    }
 +  }
 +
 +  public String getDbName() {
 +    return this.dbName;
 +  }
 +
 +  public void setDbName(String dbName) {
 +    this.dbName = dbName;
 +  }
 +
 +  public void unsetDbName() {
 +    this.dbName = null;
 +  }
 +
 +  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
 +  public boolean isSetDbName() {
 +    return this.dbName != null;
 +  }
 +
 +  public void setDbNameIsSet(boolean value) {
 +    if (!value) {
 +      this.dbName = null;
 +    }
 +  }
 +
 +  public String getTableName() {
 +    return this.tableName;
 +  }
 +
 +  public void setTableName(String tableName) {
 +    this.tableName = tableName;
 +  }
 +
 +  public void unsetTableName() {
 +    this.tableName = null;
 +  }
 +
 +  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
 +  public boolean isSetTableName() {
 +    return this.tableName != null;
 +  }
 +
 +  public void setTableNameIsSet(boolean value) {
 +    if (!value) {
 +      this.tableName = null;
 +    }
 +  }
 +
 +  public int getPartitionsSize() {
 +    return (this.partitions == null) ? 0 : this.partitions.size();
 +  }
 +
 +  public java.util.Iterator<Partition> getPartitionsIterator() {
 +    return (this.partitions == null) ? null : this.partitions.iterator();
 +  }
 +
 +  public void addToPartitions(Partition elem) {
 +    if (this.partitions == null) {
 +      this.partitions = new ArrayList<Partition>();
 +    }
 +    this.partitions.add(elem);
 +  }
 +
 +  public List<Partition> getPartitions() {
 +    return this.partitions;
 +  }
 +
 +  public void setPartitions(List<Partition> partitions) {
 +    this.partitions = partitions;
 +  }
 +
 +  public void unsetPartitions() {
 +    this.partitions = null;
 +  }
 +
 +  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
 +  public boolean isSetPartitions() {
 +    return this.partitions != null;
 +  }
 +
 +  public void setPartitionsIsSet(boolean value) {
 +    if (!value) {
 +      this.partitions = null;
 +    }
 +  }
 +
 +  public EnvironmentContext getEnvironmentContext() {
 +    return this.environmentContext;
 +  }
 +
 +  public void setEnvironmentContext(EnvironmentContext environmentContext) {
 +    this.environmentContext = environmentContext;
 +  }
 +
 +  public void unsetEnvironmentContext() {
 +    this.environmentContext = null;
 +  }
 +
 +  /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */
 +  public boolean isSetEnvironmentContext() {
 +    return this.environmentContext != null;
 +  }
 +
 +  public void setEnvironmentContextIsSet(boolean value) {
 +    if (!value) {
 +      this.environmentContext = null;
 +    }
 +  }
 +
 +  public long getTxnId() {
 +    return this.txnId;
 +  }
 +
 +  public void setTxnId(long txnId) {
 +    this.txnId = txnId;
 +    setTxnIdIsSet(true);
 +  }
 +
 +  public void unsetTxnId() {
 +    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
 +  }
 +
 +  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
 +  public boolean isSetTxnId() {
 +    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
 +  }
 +
 +  public void setTxnIdIsSet(boolean value) {
 +    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
 +  }
 +
 +  public long getWriteId() {
 +    return this.writeId;
 +  }
 +
 +  public void setWriteId(long writeId) {
 +    this.writeId = writeId;
 +    setWriteIdIsSet(true);
 +  }
 +
 +  public void unsetWriteId() {
 +    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
 +  }
 +
 +  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
 +  public boolean isSetWriteId() {
 +    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
 +  }
 +
 +  public void setWriteIdIsSet(boolean value) {
 +    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
 +  }
 +
 +  public String getValidWriteIdList() {
 +    return this.validWriteIdList;
 +  }
 +
 +  public void setValidWriteIdList(String validWriteIdList) {
 +    this.validWriteIdList = validWriteIdList;
 +  }
 +
 +  public void unsetValidWriteIdList() {
 +    this.validWriteIdList = null;
 +  }
 +
 +  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
 +  public boolean isSetValidWriteIdList() {
 +    return this.validWriteIdList != null;
 +  }
 +
 +  public void setValidWriteIdListIsSet(boolean value) {
 +    if (!value) {
 +      this.validWriteIdList = null;
 +    }
 +  }
 +
 +  public void setFieldValue(_Fields field, Object value) {
 +    switch (field) {
 +    case CAT_NAME:
 +      if (value == null) {
 +        unsetCatName();
 +      } else {
 +        setCatName((String)value);
 +      }
 +      break;
 +
 +    case DB_NAME:
 +      if (value == null) {
 +        unsetDbName();
 +      } else {
 +        setDbName((String)value);
 +      }
 +      break;
 +
 +    case TABLE_NAME:
 +      if (value == null) {
 +        unsetTableName();
 +      } else {
 +        setTableName((String)value);
 +      }
 +      break;
 +
 +    case PARTITIONS:
 +      if (value == null) {
 +        unsetPartitions();
 +      } else {
 +        setPartitions((List<Partition>)value);
 +      }
 +      break;
 +
 +    case ENVIRONMENT_CONTEXT:
 +      if (value == null) {
 +        unsetEnvironmentContext();
 +      } else {
 +        setEnvironmentContext((EnvironmentContext)value);
 +      }
 +      break;
 +
 +    case TXN_ID:
 +      if (value == null) {
 +        unsetTxnId();
 +      } else {
 +        setTxnId((Long)value);
 +      }
 +      break;
 +
 +    case WRITE_ID:
 +      if (value == null) {
 +        unsetWriteId();
 +      } else {
 +        setWriteId((Long)value);
 +      }
 +      break;
 +
 +    case VALID_WRITE_ID_LIST:
 +      if (value == null) {
 +        unsetValidWriteIdList();
 +      } else {
 +        setValidWriteIdList((String)value);
 +      }
 +      break;
 +
 +    }
 +  }
 +
 +  public Object getFieldValue(_Fields field) {
 +    switch (field) {
 +    case CAT_NAME:
 +      return getCatName();
 +
 +    case DB_NAME:
 +      return getDbName();
 +
 +    case TABLE_NAME:
 +      return getTableName();
 +
 +    case PARTITIONS:
 +      return getPartitions();
 +
 +    case ENVIRONMENT_CONTEXT:
 +      return getEnvironmentContext();
 +
 +    case TXN_ID:
 +      return getTxnId();
 +
 +    case WRITE_ID:
 +      return getWriteId();
 +
 +    case VALID_WRITE_ID_LIST:
 +      return getValidWriteIdList();
 +
 +    }
 +    throw new IllegalStateException();
 +  }
 +
 +  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
 +  public boolean isSet(_Fields field) {
 +    if (field == null) {
 +      throw new IllegalArgumentException();
 +    }
 +
 +    switch (field) {
 +    case CAT_NAME:
 +      return isSetCatName();
 +    case DB_NAME:
 +      return isSetDbName();
 +    case TABLE_NAME:
 +      return isSetTableName();
 +    case PARTITIONS:
 +      return isSetPartitions();
 +    case ENVIRONMENT_CONTEXT:
 +      return isSetEnvironmentContext();
 +    case TXN_ID:
 +      return isSetTxnId();
 +    case WRITE_ID:
 +      return isSetWriteId();
 +    case VALID_WRITE_ID_LIST:
 +      return isSetValidWriteIdList();
 +    }
 +    throw new IllegalStateException();
 +  }
 +
 +  @Override
 +  public boolean equals(Object that) {
 +    if (that == null)
 +      return false;
 +    if (that instanceof AlterPartitionsRequest)
 +      return this.equals((AlterPartitionsRequest)that);
 +    return false;
 +  }
 +
 +  public boolean equals(AlterPartitionsRequest that) {
 +    if (that == null)
 +      return false;
 +
 +    boolean this_present_catName = true && this.isSetCatName();
 +    boolean that_present_catName = true && that.isSetCatName();
 +    if (this_present_catName || that_present_catName) {
 +      if (!(this_present_catName && that_present_catName))
 +        return false;
 +      if (!this.catName.equals(that.catName))
 +        return false;
 +    }
 +
 +    boolean this_present_dbName = true && this.isSetDbName();
 +    boolean that_present_dbName = true && that.isSetDbName();
 +    if (this_present_dbName || that_present_dbName) {
 +      if (!(this_present_dbName && that_present_dbName))
 +        return false;
 +      if (!this.dbName.equals(that.dbName))
 +        return false;
 +    }
 +
 +    boolean this_present_tableName = true && this.isSetTableName();
 +    boolean that_present_tableName = true && that.isSetTableName();
 +    if (this_present_tableName || that_present_tableName) {
 +      if (!(this_present_tableName && that_present_tableName))
 +        return false;
 +      if (!this.tableName.equals(that.tableName))
 +        return false;
 +    }
 +
 +    boolean this_present_partitions = true && this.isSetPartitions();
 +    boolean that_present_partitions = true && that.isSetPartitions();
 +    if (this_present_partitions || that_present_partitions) {
 +      if (!(this_present_partitions && that_present_partitions))
 +        return false;
 +      if (!this.partitions.equals(that.partitions))
 +        return false;
 +    }
 +
 +    boolean this_present_environmentContext = true && this.isSetEnvironmentContext();
 +    boolean that_present_environmentContext = true && that.isSetEnvironmentContext();
 +    if (this_present_environmentContext || that_present_environmentContext) {
 +      if (!(this_present_environmentContext && that_present_environmentContext))
 +        return false;
 +      if (!this.environmentContext.equals(that.environmentContext))
 +        return false;
 +    }
 +
 +    boolean this_present_txnId = true && this.isSetTxnId();
 +    boolean that_present_txnId = true && that.isSetTxnId();
 +    if (this_present_txnId || that_present_txnId) {
 +      if (!(this_present_txnId && that_present_txnId))
 +        return false;
 +      if (this.txnId != that.txnId)
 +        return false;
 +    }
 +
 +    boolean this_present_writeId = true && this.isSetWriteId();
 +    boolean that_present_writeId = true && that.isSetWriteId();
 +    if (this_present_writeId || that_present_writeId) {
 +      if (!(this_present_writeId && that_present_writeId))
 +        return false;
 +      if (this.writeId != that.writeId)
 +        return false;
 +    }
 +
 +    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
 +    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
 +    if (this_present_validWriteIdList || that_present_validWriteIdList) {
 +      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
 +        return false;
 +      if (!this.validWriteIdList.equals(that.validWriteIdList))
 +        return false;
 +    }
 +
 +    return true;
 +  }
 +
 +  @Override
 +  public int hashCode() {
 +    List<Object> list = new ArrayList<Object>();
 +
 +    boolean present_catName = true && (isSetCatName());
 +    list.add(present_catName);
 +    if (present_catName)
 +      list.add(catName);
 +
 +    boolean present_dbName = true && (isSetDbName());
 +    list.add(present_dbName);
 +    if (present_dbName)
 +      list.add(dbName);
 +
 +    boolean present_tableName = true && (isSetTableName());
 +    list.add(present_tableName);
 +    if (present_tableName)
 +      list.add(tableName);
 +
 +    boolean present_partitions = true && (isSetPartitions());
 +    list.add(present_partitions);
 +    if (present_partitions)
 +      list.add(partitions);
 +
 +    boolean present_environmentContext = true && (isSetEnvironmentContext());
 +    list.add(present_environmentContext);
 +    if (present_environmentContext)
 +      list.add(environmentContext);
 +
 +    boolean present_txnId = true && (isSetTxnId());
 +    list.add(present_txnId);
 +    if (present_txnId)
 +      list.add(txnId);
 +
 +    boolean present_writeId = true && (isSetWriteId());
 +    list.add(present_writeId);
 +    if (present_writeId)
 +      list.add(writeId);
 +
 +    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
 +    list.add(present_validWriteIdList);
 +    if (present_validWriteIdList)
 +      list.add(validWriteIdList);
 +
 +    return list.hashCode();
 +  }
 +
 +  @Override
 +  public int compareTo(AlterPartitionsRequest other) {
 +    if (!getClass().equals(other.getClass())) {
 +      return getClass().getName().compareTo(other.getClass().getName());
 +    }
 +
 +    int lastComparison = 0;
 +
 +    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetCatName()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetDbName()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetTableName()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetPartitions()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetEnvironmentContext()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetTxnId()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetWriteId()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
 +    if (lastComparison != 0) {
 +      return lastComparison;
 +    }
 +    if (isSetValidWriteIdList()) {
 +      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
 +      if (lastComparison != 0) {
 +        return lastComparison;
 +      }
 +    }
 +    return 0;
 +  }
 +
 +  public _Fields fieldForId(int fieldId) {
 +    return _Fields.findByThriftId(fieldId);
 +  }
 +
 +  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
 +    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
 +  }
 +
 +  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
 +    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
 +  }
 +
 +  @Override
 +  public String toString() {
 +    StringBuilder sb = new StringBuilder("AlterPartitionsRequest(");
 +    boolean first = true;
 +
 +    if (isSetCatName()) {
 +      sb.append("catName:");
 +      if (this.catName == null) {
 +        sb.append("null");
 +      } else {
 +        sb.append(this.catName);
 +      }
 +      first = false;
 +    }
 +    if (!first) sb.append(", ");
 +    sb.append("dbName:");
 +    if (this.dbName == null) {
 +      sb.append("null");
 +    } else {
 +      sb.append(this.dbName);
 +    }
 +    first = false;
 +    if (!first) sb.append(", ");
 +    sb.append("tableName:");
 +    if (this.tableName == null) {
 +      sb.append("null");
 +    } else {
 +      sb.append(this.tableName);
 +    }
 +    first = false;
 +    if (!first) sb.append(", ");
 +    sb.append("partitions:");
 +    if (this.partitions == null) {
 +      sb.append("null");
 +    } else {
 +      sb.append(this.partitions);
 +    }
 +    first = false;
 +    if (isSetEnvironmentContext()) {
 +      if (!first) sb.append(", ");
 +      sb.append("environmentContext:");
 +      if (this.environmentContext == null) {
 +        sb.append("null");
 +      } else {
 +        sb.append(this.environmentContext);
 +      }
 +      first = false;
 +    }
 +    if (isSetTxnId()) {
 +      if (!first) sb.append(", ");
 +      sb.append("txnId:");
 +      sb.append(this.txnId);
 +      first = false;
 +    }
 +    if (isSetWriteId()) {
 +      if (!first) sb.append(", ");
 +      sb.append("writeId:");
 +      sb.append(this.writeId);
 +      first = false;
 +    }
 +    if (isSetValidWriteIdList()) {
 +      if (!first) sb.append(", ");
 +      sb.append("validWriteIdList:");
 +      if (this.validWriteIdList == null) {
 +        sb.append("null");
 +      } else {
 +        sb.append(this.validWriteIdList);
 +      }
 +      first = false;
 +    }
 +    sb.append(")");
 +    return sb.toString();
 +  }
 +
 +  public void validate() throws org.apache.thrift.TException {
 +    // check for required fields
 +    if (!isSetDbName()) {
 +      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
 +    }
 +
 +    if (!isSetTableName()) {
 +      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
 +    }
 +
 +    if (!isSetPartitions()) {
 +      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString());
 +    }
 +
 +    // check for sub-struct validity
 +    if (environmentContext != null) {
 +      environmentContext.validate();
 +    }
 +  }
 +
 +  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
 +    try {
 +      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
 +    } catch (org.apache.thrift.TException te) {
 +      throw new java.io.IOException(te);
 +    }
 +  }
 +
 +  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
 +    try {
 +      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
 +      __isset_bitfield = 0;
 +      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
 +    } catch (org.apache.thrift.TException te) {
 +      throw new java.io.IOException(te);
 +    }
 +  }
 +
 +  private static class AlterPartitionsRequestStandardSchemeFactory implements SchemeFactory {
 +    public AlterPartitionsRequestStandardScheme getScheme() {
 +      return new AlterPartitionsRequestStandardScheme();
 +    }
 +  }
 +
 +  private static class AlterPartitionsRequestStandardScheme extends StandardScheme<AlterPartitionsRequest> {
 +
 +    public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
 +      org.apache.thrift.protocol.TField schemeField;
 +      iprot.readStructBegin();
 +      while (true)
 +      {
 +        schemeField = iprot.readFieldBegin();
 +        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
 +          break;
 +        }
 +        switch (schemeField.id) {
 +          case 1: // CAT_NAME
 +            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 +              struct.catName = iprot.readString();
 +              struct.setCatNameIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 2: // DB_NAME
 +            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 +              struct.dbName = iprot.readString();
 +              struct.setDbNameIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 3: // TABLE_NAME
 +            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 +              struct.tableName = iprot.readString();
 +              struct.setTableNameIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 4: // PARTITIONS
 +            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 +              {
-                 org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
-                 struct.partitions = new ArrayList<Partition>(_list960.size);
-                 Partition _elem961;
-                 for (int _i962 = 0; _i962 < _list960.size; ++_i962)
++                org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
++                struct.partitions = new ArrayList<Partition>(_list952.size);
++                Partition _elem953;
++                for (int _i954 = 0; _i954 < _list952.size; ++_i954)
 +                {
-                   _elem961 = new Partition();
-                   _elem961.read(iprot);
-                   struct.partitions.add(_elem961);
++                  _elem953 = new Partition();
++                  _elem953.read(iprot);
++                  struct.partitions.add(_elem953);
 +                }
 +                iprot.readListEnd();
 +              }
 +              struct.setPartitionsIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 5: // ENVIRONMENT_CONTEXT
 +            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
 +              struct.environmentContext = new EnvironmentContext();
 +              struct.environmentContext.read(iprot);
 +              struct.setEnvironmentContextIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 6: // TXN_ID
 +            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
 +              struct.txnId = iprot.readI64();
 +              struct.setTxnIdIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 7: // WRITE_ID
 +            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
 +              struct.writeId = iprot.readI64();
 +              struct.setWriteIdIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          case 8: // VALID_WRITE_ID_LIST
 +            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 +              struct.validWriteIdList = iprot.readString();
 +              struct.setValidWriteIdListIsSet(true);
 +            } else { 
 +              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +            }
 +            break;
 +          default:
 +            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +        }
 +        iprot.readFieldEnd();
 +      }
 +      iprot.readStructEnd();
 +      struct.validate();
 +    }
 +
 +    public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
 +      struct.validate();
 +
 +      oprot.writeStructBegin(STRUCT_DESC);
 +      if (struct.catName != null) {
 +        if (struct.isSetCatName()) {
 +          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
 +          oprot.writeString(struct.catName);
 +          oprot.writeFieldEnd();
 +        }
 +      }
 +      if (struct.dbName != null) {
 +        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
 +        oprot.writeString(struct.dbName);
 +        oprot.writeFieldEnd();
 +      }
 +      if (struct.tableName != null) {
 +        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
 +        oprot.writeString(struct.tableName);
 +        oprot.writeFieldEnd();
 +      }
 +      if (struct.partitions != null) {
 +        oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
 +        {
 +          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
-           for (Partition _iter963 : struct.partitions)
++          for (Partition _iter955 : struct.partitions)
 +          {
-             _iter963.write(oprot);
++            _iter955.write(oprot);
 +          }
 +          oprot.writeListEnd();
 +        }
 +        oprot.writeFieldEnd();
 +      }
 +      if (struct.environmentContext != null) {
 +        if (struct.isSetEnvironmentContext()) {
 +          oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC);
 +          struct.environmentContext.write(oprot);
 +          oprot.writeFieldEnd();
 +        }
 +      }
 +      if (struct.isSetTxnId()) {
 +        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
 +        oprot.writeI64(struct.txnId);
 +        oprot.writeFieldEnd();
 +      }
 +      if (struct.isSetWriteId()) {
 +        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
 +        oprot.writeI64(struct.writeId);
 +        oprot.writeFieldEnd();
 +      }
 +      if (struct.validWriteIdList != null) {
 +        if (struct.isSetValidWriteIdList()) {
 +          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
 +          oprot.writeString(struct.validWriteIdList);
 +          oprot.writeFieldEnd();
 +        }
 +      }
 +      oprot.writeFieldStop();
 +      oprot.writeStructEnd();
 +    }
 +
 +  }
 +
 +  private static class AlterPartitionsRequestTupleSchemeFactory implements SchemeFactory {
 +    public AlterPartitionsRequestTupleScheme getScheme() {
 +      return new AlterPartitionsRequestTupleScheme();
 +    }
 +  }
 +
 +  private static class AlterPartitionsRequestTupleScheme extends TupleScheme<AlterPartitionsRequest> {
 +
 +    @Override
 +    public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
 +      TTupleProtocol oprot = (TTupleProtocol) prot;
 +      oprot.writeString(struct.dbName);
 +      oprot.writeString(struct.tableName);
 +      {
 +        oprot.writeI32(struct.partitions.size());
-         for (Partition _iter964 : struct.partitions)
++        for (Partition _iter956 : struct.partitions)
 +        {
-           _iter964.write(oprot);
++          _iter956.write(oprot);
 +        }
 +      }
 +      BitSet optionals = new BitSet();
 +      if (struct.isSetCatName()) {
 +        optionals.set(0);
 +      }
 +      if (struct.isSetEnvironmentContext()) {
 +        optionals.set(1);
 +      }
 +      if (struct.isSetTxnId()) {
 +        optionals.set(2);
 +      }
 +      if (struct.isSetWriteId()) {
 +        optionals.set(3);
 +      }
 +      if (struct.isSetValidWriteIdList()) {
 +        optionals.set(4);
 +      }
 +      oprot.writeBitSet(optionals, 5);
 +      if (struct.isSetCatName()) {
 +        oprot.writeString(struct.catName);
 +      }
 +      if (struct.isSetEnvironmentContext()) {
 +        struct.environmentContext.write(oprot);
 +      }
 +      if (struct.isSetTxnId()) {
 +        oprot.writeI64(struct.txnId);
 +      }
 +      if (struct.isSetWriteId()) {
 +        oprot.writeI64(struct.writeId);
 +      }
 +      if (struct.isSetValidWriteIdList()) {
 +        oprot.writeString(struct.validWriteIdList);
 +      }
 +    }
 +
 +    @Override
 +    public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
 +      TTupleProtocol iprot = (TTupleProtocol) prot;
 +      struct.dbName = iprot.readString();
 +      struct.setDbNameIsSet(true);
 +      struct.tableName = iprot.readString();
 +      struct.setTableNameIsSet(true);
 +      {
-         org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-         struct.partitions = new ArrayList<Partition>(_list965.size);
-         Partition _elem966;
-         for (int _i967 = 0; _i967 < _list965.size; ++_i967)
++        org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++        struct.partitions = new ArrayList<Partition>(_list957.size);
++        Partition _elem958;
++        for (int _i959 = 0; _i959 < _list957.size; ++_i959)
 +        {
-           _elem966 = new Partition();
-           _elem966.read(iprot);
-           struct.partitions.add(_elem966);
++          _elem958 = new Partition();
++          _elem958.read(iprot);
++          struct.partitions.add(_elem958);
 +        }
 +      }
 +      struct.setPartitionsIsSet(true);
 +      BitSet incoming = iprot.readBitSet(5);
 +      if (incoming.get(0)) {
 +        struct.catName = iprot.readString();
 +        struct.setCatNameIsSet(true);
 +      }
 +      if (incoming.get(1)) {
 +        struct.environmentContext = new EnvironmentContext();
 +        struct.environmentContext.read(iprot);
 +        struct.setEnvironmentContextIsSet(true);
 +      }
 +      if (incoming.get(2)) {
 +        struct.txnId = iprot.readI64();
 +        struct.setTxnIdIsSet(true);
 +      }
 +      if (incoming.get(3)) {
 +        struct.writeId = iprot.readI64();
 +        struct.setWriteIdIsSet(true);
 +      }
 +      if (incoming.get(4)) {
 +        struct.validWriteIdList = iprot.readString();
 +        struct.setValidWriteIdListIsSet(true);
 +      }
 +    }
 +  }
 +
 +}
 +

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
index f2f8fb4,79d9fc6..8f5b4e5
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
@@@ -350,14 -350,14 +350,14 @@@ import org.slf4j.LoggerFactory
            case 1: // SCHEMA_VERSIONS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
-                 struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list952.size);
-                 SchemaVersionDescriptor _elem953;
-                 for (int _i954 = 0; _i954 < _list952.size; ++_i954)
 -                org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
 -                struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list936.size);
 -                SchemaVersionDescriptor _elem937;
 -                for (int _i938 = 0; _i938 < _list936.size; ++_i938)
++                org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
++                struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list944.size);
++                SchemaVersionDescriptor _elem945;
++                for (int _i946 = 0; _i946 < _list944.size; ++_i946)
                  {
-                   _elem953 = new SchemaVersionDescriptor();
-                   _elem953.read(iprot);
-                   struct.schemaVersions.add(_elem953);
 -                  _elem937 = new SchemaVersionDescriptor();
 -                  _elem937.read(iprot);
 -                  struct.schemaVersions.add(_elem937);
++                  _elem945 = new SchemaVersionDescriptor();
++                  _elem945.read(iprot);
++                  struct.schemaVersions.add(_elem945);
                  }
                  iprot.readListEnd();
                }
@@@ -383,9 -383,9 +383,9 @@@
          oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC);
          {
            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size()));
-           for (SchemaVersionDescriptor _iter955 : struct.schemaVersions)
 -          for (SchemaVersionDescriptor _iter939 : struct.schemaVersions)
++          for (SchemaVersionDescriptor _iter947 : struct.schemaVersions)
            {
-             _iter955.write(oprot);
 -            _iter939.write(oprot);
++            _iter947.write(oprot);
            }
            oprot.writeListEnd();
          }
@@@ -416,9 -416,9 +416,9 @@@
        if (struct.isSetSchemaVersions()) {
          {
            oprot.writeI32(struct.schemaVersions.size());
-           for (SchemaVersionDescriptor _iter956 : struct.schemaVersions)
 -          for (SchemaVersionDescriptor _iter940 : struct.schemaVersions)
++          for (SchemaVersionDescriptor _iter948 : struct.schemaVersions)
            {
-             _iter956.write(oprot);
 -            _iter940.write(oprot);
++            _iter948.write(oprot);
            }
          }
        }
@@@ -430,14 -430,14 +430,14 @@@
        BitSet incoming = iprot.readBitSet(1);
        if (incoming.get(0)) {
          {
-           org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list957.size);
-           SchemaVersionDescriptor _elem958;
-           for (int _i959 = 0; _i959 < _list957.size; ++_i959)
 -          org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list941.size);
 -          SchemaVersionDescriptor _elem942;
 -          for (int _i943 = 0; _i943 < _list941.size; ++_i943)
++          org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list949.size);
++          SchemaVersionDescriptor _elem950;
++          for (int _i951 = 0; _i951 < _list949.size; ++_i951)
            {
-             _elem958 = new SchemaVersionDescriptor();
-             _elem958.read(iprot);
-             struct.schemaVersions.add(_elem958);
 -            _elem942 = new SchemaVersionDescriptor();
 -            _elem942.read(iprot);
 -            struct.schemaVersions.add(_elem942);
++            _elem950 = new SchemaVersionDescriptor();
++            _elem950.read(iprot);
++            struct.schemaVersions.add(_elem950);
            }
          }
          struct.setSchemaVersionsIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
index dd16932,935af04..88d7e3f
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SchemaVersion.java
@@@ -1119,14 -1119,14 +1119,14 @@@ import org.slf4j.LoggerFactory
            case 4: // COLS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
-                 struct.cols = new ArrayList<FieldSchema>(_list944.size);
-                 FieldSchema _elem945;
-                 for (int _i946 = 0; _i946 < _list944.size; ++_i946)
 -                org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
 -                struct.cols = new ArrayList<FieldSchema>(_list928.size);
 -                FieldSchema _elem929;
 -                for (int _i930 = 0; _i930 < _list928.size; ++_i930)
++                org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
++                struct.cols = new ArrayList<FieldSchema>(_list936.size);
++                FieldSchema _elem937;
++                for (int _i938 = 0; _i938 < _list936.size; ++_i938)
                  {
-                   _elem945 = new FieldSchema();
-                   _elem945.read(iprot);
-                   struct.cols.add(_elem945);
 -                  _elem929 = new FieldSchema();
 -                  _elem929.read(iprot);
 -                  struct.cols.add(_elem929);
++                  _elem937 = new FieldSchema();
++                  _elem937.read(iprot);
++                  struct.cols.add(_elem937);
                  }
                  iprot.readListEnd();
                }
@@@ -1212,9 -1212,9 +1212,9 @@@
          oprot.writeFieldBegin(COLS_FIELD_DESC);
          {
            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size()));
-           for (FieldSchema _iter947 : struct.cols)
 -          for (FieldSchema _iter931 : struct.cols)
++          for (FieldSchema _iter939 : struct.cols)
            {
-             _iter947.write(oprot);
 -            _iter931.write(oprot);
++            _iter939.write(oprot);
            }
            oprot.writeListEnd();
          }
@@@ -1323,9 -1323,9 +1323,9 @@@
        if (struct.isSetCols()) {
          {
            oprot.writeI32(struct.cols.size());
-           for (FieldSchema _iter948 : struct.cols)
 -          for (FieldSchema _iter932 : struct.cols)
++          for (FieldSchema _iter940 : struct.cols)
            {
-             _iter948.write(oprot);
 -            _iter932.write(oprot);
++            _iter940.write(oprot);
            }
          }
        }
@@@ -1368,14 -1368,14 +1368,14 @@@
        }
        if (incoming.get(3)) {
          {
-           org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.cols = new ArrayList<FieldSchema>(_list949.size);
-           FieldSchema _elem950;
-           for (int _i951 = 0; _i951 < _list949.size; ++_i951)
 -          org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.cols = new ArrayList<FieldSchema>(_list933.size);
 -          FieldSchema _elem934;
 -          for (int _i935 = 0; _i935 < _list933.size; ++_i935)
++          org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.cols = new ArrayList<FieldSchema>(_list941.size);
++          FieldSchema _elem942;
++          for (int _i943 = 0; _i943 < _list941.size; ++_i943)
            {
-             _elem950 = new FieldSchema();
-             _elem950.read(iprot);
-             struct.cols.add(_elem950);
 -            _elem934 = new FieldSchema();
 -            _elem934.read(iprot);
 -            struct.cols.add(_elem934);
++            _elem942 = new FieldSchema();
++            _elem942.read(iprot);
++            struct.cols.add(_elem942);
            }
          }
          struct.setColsIsSet(true);


[19/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index a5bcc10..2ae6d9a 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@ -347,11 +347,11 @@ class Iface(fb303.FacebookService.Iface):
     """
     pass
 
-  def get_materialization_invalidation_info(self, dbname, tbl_names):
+  def get_materialization_invalidation_info(self, creation_metadata, validTxnList):
     """
     Parameters:
-     - dbname
-     - tbl_names
+     - creation_metadata
+     - validTxnList
     """
     pass
 
@@ -3141,20 +3141,20 @@ class Client(fb303.FacebookService.Client, Iface):
       raise result.o3
     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result")
 
-  def get_materialization_invalidation_info(self, dbname, tbl_names):
+  def get_materialization_invalidation_info(self, creation_metadata, validTxnList):
     """
     Parameters:
-     - dbname
-     - tbl_names
+     - creation_metadata
+     - validTxnList
     """
-    self.send_get_materialization_invalidation_info(dbname, tbl_names)
+    self.send_get_materialization_invalidation_info(creation_metadata, validTxnList)
     return self.recv_get_materialization_invalidation_info()
 
-  def send_get_materialization_invalidation_info(self, dbname, tbl_names):
+  def send_get_materialization_invalidation_info(self, creation_metadata, validTxnList):
     self._oprot.writeMessageBegin('get_materialization_invalidation_info', TMessageType.CALL, self._seqid)
     args = get_materialization_invalidation_info_args()
-    args.dbname = dbname
-    args.tbl_names = tbl_names
+    args.creation_metadata = creation_metadata
+    args.validTxnList = validTxnList
     args.write(self._oprot)
     self._oprot.writeMessageEnd()
     self._oprot.trans.flush()
@@ -10357,7 +10357,7 @@ class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
     iprot.readMessageEnd()
     result = get_materialization_invalidation_info_result()
     try:
-      result.success = self._handler.get_materialization_invalidation_info(args.dbname, args.tbl_names)
+      result.success = self._handler.get_materialization_invalidation_info(args.creation_metadata, args.validTxnList)
       msg_type = TMessageType.REPLY
     except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
       raise
@@ -16045,10 +16045,10 @@ class get_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype840, _size837) = iprot.readListBegin()
-          for _i841 in xrange(_size837):
-            _elem842 = iprot.readString()
-            self.success.append(_elem842)
+          (_etype833, _size830) = iprot.readListBegin()
+          for _i834 in xrange(_size830):
+            _elem835 = iprot.readString()
+            self.success.append(_elem835)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16071,8 +16071,8 @@ class get_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter843 in self.success:
-        oprot.writeString(iter843)
+      for iter836 in self.success:
+        oprot.writeString(iter836)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -16177,10 +16177,10 @@ class get_all_databases_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype847, _size844) = iprot.readListBegin()
-          for _i848 in xrange(_size844):
-            _elem849 = iprot.readString()
-            self.success.append(_elem849)
+          (_etype840, _size837) = iprot.readListBegin()
+          for _i841 in xrange(_size837):
+            _elem842 = iprot.readString()
+            self.success.append(_elem842)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -16203,8 +16203,8 @@ class get_all_databases_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter850 in self.success:
-        oprot.writeString(iter850)
+      for iter843 in self.success:
+        oprot.writeString(iter843)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -16974,12 +16974,12 @@ class get_type_all_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype852, _vtype853, _size851 ) = iprot.readMapBegin()
-          for _i855 in xrange(_size851):
-            _key856 = iprot.readString()
-            _val857 = Type()
-            _val857.read(iprot)
-            self.success[_key856] = _val857
+          (_ktype845, _vtype846, _size844 ) = iprot.readMapBegin()
+          for _i848 in xrange(_size844):
+            _key849 = iprot.readString()
+            _val850 = Type()
+            _val850.read(iprot)
+            self.success[_key849] = _val850
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -17002,9 +17002,9 @@ class get_type_all_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-      for kiter858,viter859 in self.success.items():
-        oprot.writeString(kiter858)
-        viter859.write(oprot)
+      for kiter851,viter852 in self.success.items():
+        oprot.writeString(kiter851)
+        viter852.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o2 is not None:
@@ -17147,11 +17147,11 @@ class get_fields_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype863, _size860) = iprot.readListBegin()
-          for _i864 in xrange(_size860):
-            _elem865 = FieldSchema()
-            _elem865.read(iprot)
-            self.success.append(_elem865)
+          (_etype856, _size853) = iprot.readListBegin()
+          for _i857 in xrange(_size853):
+            _elem858 = FieldSchema()
+            _elem858.read(iprot)
+            self.success.append(_elem858)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17186,8 +17186,8 @@ class get_fields_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter866 in self.success:
-        iter866.write(oprot)
+      for iter859 in self.success:
+        iter859.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17354,11 +17354,11 @@ class get_fields_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype870, _size867) = iprot.readListBegin()
-          for _i871 in xrange(_size867):
-            _elem872 = FieldSchema()
-            _elem872.read(iprot)
-            self.success.append(_elem872)
+          (_etype863, _size860) = iprot.readListBegin()
+          for _i864 in xrange(_size860):
+            _elem865 = FieldSchema()
+            _elem865.read(iprot)
+            self.success.append(_elem865)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17393,8 +17393,8 @@ class get_fields_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter873 in self.success:
-        iter873.write(oprot)
+      for iter866 in self.success:
+        iter866.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17547,11 +17547,11 @@ class get_schema_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype877, _size874) = iprot.readListBegin()
-          for _i878 in xrange(_size874):
-            _elem879 = FieldSchema()
-            _elem879.read(iprot)
-            self.success.append(_elem879)
+          (_etype870, _size867) = iprot.readListBegin()
+          for _i871 in xrange(_size867):
+            _elem872 = FieldSchema()
+            _elem872.read(iprot)
+            self.success.append(_elem872)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17586,8 +17586,8 @@ class get_schema_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter880 in self.success:
-        iter880.write(oprot)
+      for iter873 in self.success:
+        iter873.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -17754,11 +17754,11 @@ class get_schema_with_environment_context_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype884, _size881) = iprot.readListBegin()
-          for _i885 in xrange(_size881):
-            _elem886 = FieldSchema()
-            _elem886.read(iprot)
-            self.success.append(_elem886)
+          (_etype877, _size874) = iprot.readListBegin()
+          for _i878 in xrange(_size874):
+            _elem879 = FieldSchema()
+            _elem879.read(iprot)
+            self.success.append(_elem879)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -17793,8 +17793,8 @@ class get_schema_with_environment_context_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter887 in self.success:
-        iter887.write(oprot)
+      for iter880 in self.success:
+        iter880.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -18247,66 +18247,66 @@ class create_table_with_constraints_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.primaryKeys = []
-          (_etype891, _size888) = iprot.readListBegin()
-          for _i892 in xrange(_size888):
-            _elem893 = SQLPrimaryKey()
-            _elem893.read(iprot)
-            self.primaryKeys.append(_elem893)
+          (_etype884, _size881) = iprot.readListBegin()
+          for _i885 in xrange(_size881):
+            _elem886 = SQLPrimaryKey()
+            _elem886.read(iprot)
+            self.primaryKeys.append(_elem886)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.foreignKeys = []
-          (_etype897, _size894) = iprot.readListBegin()
-          for _i898 in xrange(_size894):
-            _elem899 = SQLForeignKey()
-            _elem899.read(iprot)
-            self.foreignKeys.append(_elem899)
+          (_etype890, _size887) = iprot.readListBegin()
+          for _i891 in xrange(_size887):
+            _elem892 = SQLForeignKey()
+            _elem892.read(iprot)
+            self.foreignKeys.append(_elem892)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.uniqueConstraints = []
-          (_etype903, _size900) = iprot.readListBegin()
-          for _i904 in xrange(_size900):
-            _elem905 = SQLUniqueConstraint()
-            _elem905.read(iprot)
-            self.uniqueConstraints.append(_elem905)
+          (_etype896, _size893) = iprot.readListBegin()
+          for _i897 in xrange(_size893):
+            _elem898 = SQLUniqueConstraint()
+            _elem898.read(iprot)
+            self.uniqueConstraints.append(_elem898)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.notNullConstraints = []
-          (_etype909, _size906) = iprot.readListBegin()
-          for _i910 in xrange(_size906):
-            _elem911 = SQLNotNullConstraint()
-            _elem911.read(iprot)
-            self.notNullConstraints.append(_elem911)
+          (_etype902, _size899) = iprot.readListBegin()
+          for _i903 in xrange(_size899):
+            _elem904 = SQLNotNullConstraint()
+            _elem904.read(iprot)
+            self.notNullConstraints.append(_elem904)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.LIST:
           self.defaultConstraints = []
-          (_etype915, _size912) = iprot.readListBegin()
-          for _i916 in xrange(_size912):
-            _elem917 = SQLDefaultConstraint()
-            _elem917.read(iprot)
-            self.defaultConstraints.append(_elem917)
+          (_etype908, _size905) = iprot.readListBegin()
+          for _i909 in xrange(_size905):
+            _elem910 = SQLDefaultConstraint()
+            _elem910.read(iprot)
+            self.defaultConstraints.append(_elem910)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 7:
         if ftype == TType.LIST:
           self.checkConstraints = []
-          (_etype921, _size918) = iprot.readListBegin()
-          for _i922 in xrange(_size918):
-            _elem923 = SQLCheckConstraint()
-            _elem923.read(iprot)
-            self.checkConstraints.append(_elem923)
+          (_etype914, _size911) = iprot.readListBegin()
+          for _i915 in xrange(_size911):
+            _elem916 = SQLCheckConstraint()
+            _elem916.read(iprot)
+            self.checkConstraints.append(_elem916)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18327,43 +18327,43 @@ class create_table_with_constraints_args:
     if self.primaryKeys is not None:
       oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
-      for iter924 in self.primaryKeys:
-        iter924.write(oprot)
+      for iter917 in self.primaryKeys:
+        iter917.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.foreignKeys is not None:
       oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
-      for iter925 in self.foreignKeys:
-        iter925.write(oprot)
+      for iter918 in self.foreignKeys:
+        iter918.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.uniqueConstraints is not None:
       oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints))
-      for iter926 in self.uniqueConstraints:
-        iter926.write(oprot)
+      for iter919 in self.uniqueConstraints:
+        iter919.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.notNullConstraints is not None:
       oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5)
       oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints))
-      for iter927 in self.notNullConstraints:
-        iter927.write(oprot)
+      for iter920 in self.notNullConstraints:
+        iter920.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.defaultConstraints is not None:
       oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6)
       oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints))
-      for iter928 in self.defaultConstraints:
-        iter928.write(oprot)
+      for iter921 in self.defaultConstraints:
+        iter921.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.checkConstraints is not None:
       oprot.writeFieldBegin('checkConstraints', TType.LIST, 7)
       oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints))
-      for iter929 in self.checkConstraints:
-        iter929.write(oprot)
+      for iter922 in self.checkConstraints:
+        iter922.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19923,10 +19923,10 @@ class truncate_table_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.partNames = []
-          (_etype933, _size930) = iprot.readListBegin()
-          for _i934 in xrange(_size930):
-            _elem935 = iprot.readString()
-            self.partNames.append(_elem935)
+          (_etype926, _size923) = iprot.readListBegin()
+          for _i927 in xrange(_size923):
+            _elem928 = iprot.readString()
+            self.partNames.append(_elem928)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19951,8 +19951,8 @@ class truncate_table_args:
     if self.partNames is not None:
       oprot.writeFieldBegin('partNames', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.partNames))
-      for iter936 in self.partNames:
-        oprot.writeString(iter936)
+      for iter929 in self.partNames:
+        oprot.writeString(iter929)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20152,10 +20152,10 @@ class get_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype940, _size937) = iprot.readListBegin()
-          for _i941 in xrange(_size937):
-            _elem942 = iprot.readString()
-            self.success.append(_elem942)
+          (_etype933, _size930) = iprot.readListBegin()
+          for _i934 in xrange(_size930):
+            _elem935 = iprot.readString()
+            self.success.append(_elem935)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20178,8 +20178,8 @@ class get_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter943 in self.success:
-        oprot.writeString(iter943)
+      for iter936 in self.success:
+        oprot.writeString(iter936)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20329,10 +20329,10 @@ class get_tables_by_type_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype947, _size944) = iprot.readListBegin()
-          for _i948 in xrange(_size944):
-            _elem949 = iprot.readString()
-            self.success.append(_elem949)
+          (_etype940, _size937) = iprot.readListBegin()
+          for _i941 in xrange(_size937):
+            _elem942 = iprot.readString()
+            self.success.append(_elem942)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20355,8 +20355,8 @@ class get_tables_by_type_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter950 in self.success:
-        oprot.writeString(iter950)
+      for iter943 in self.success:
+        oprot.writeString(iter943)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20480,10 +20480,10 @@ class get_materialized_views_for_rewriting_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype954, _size951) = iprot.readListBegin()
-          for _i955 in xrange(_size951):
-            _elem956 = iprot.readString()
-            self.success.append(_elem956)
+          (_etype947, _size944) = iprot.readListBegin()
+          for _i948 in xrange(_size944):
+            _elem949 = iprot.readString()
+            self.success.append(_elem949)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20506,8 +20506,8 @@ class get_materialized_views_for_rewriting_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter957 in self.success:
-        oprot.writeString(iter957)
+      for iter950 in self.success:
+        oprot.writeString(iter950)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20580,10 +20580,10 @@ class get_table_meta_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.tbl_types = []
-          (_etype961, _size958) = iprot.readListBegin()
-          for _i962 in xrange(_size958):
-            _elem963 = iprot.readString()
-            self.tbl_types.append(_elem963)
+          (_etype954, _size951) = iprot.readListBegin()
+          for _i955 in xrange(_size951):
+            _elem956 = iprot.readString()
+            self.tbl_types.append(_elem956)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20608,8 +20608,8 @@ class get_table_meta_args:
     if self.tbl_types is not None:
       oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-      for iter964 in self.tbl_types:
-        oprot.writeString(iter964)
+      for iter957 in self.tbl_types:
+        oprot.writeString(iter957)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20665,11 +20665,11 @@ class get_table_meta_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype968, _size965) = iprot.readListBegin()
-          for _i969 in xrange(_size965):
-            _elem970 = TableMeta()
-            _elem970.read(iprot)
-            self.success.append(_elem970)
+          (_etype961, _size958) = iprot.readListBegin()
+          for _i962 in xrange(_size958):
+            _elem963 = TableMeta()
+            _elem963.read(iprot)
+            self.success.append(_elem963)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20692,8 +20692,8 @@ class get_table_meta_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter971 in self.success:
-        iter971.write(oprot)
+      for iter964 in self.success:
+        iter964.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -20817,10 +20817,10 @@ class get_all_tables_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype975, _size972) = iprot.readListBegin()
-          for _i976 in xrange(_size972):
-            _elem977 = iprot.readString()
-            self.success.append(_elem977)
+          (_etype968, _size965) = iprot.readListBegin()
+          for _i969 in xrange(_size965):
+            _elem970 = iprot.readString()
+            self.success.append(_elem970)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20843,8 +20843,8 @@ class get_all_tables_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter978 in self.success:
-        oprot.writeString(iter978)
+      for iter971 in self.success:
+        oprot.writeString(iter971)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -21080,10 +21080,10 @@ class get_table_objects_by_name_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tbl_names = []
-          (_etype982, _size979) = iprot.readListBegin()
-          for _i983 in xrange(_size979):
-            _elem984 = iprot.readString()
-            self.tbl_names.append(_elem984)
+          (_etype975, _size972) = iprot.readListBegin()
+          for _i976 in xrange(_size972):
+            _elem977 = iprot.readString()
+            self.tbl_names.append(_elem977)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21104,8 +21104,8 @@ class get_table_objects_by_name_args:
     if self.tbl_names is not None:
       oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter985 in self.tbl_names:
-        oprot.writeString(iter985)
+      for iter978 in self.tbl_names:
+        oprot.writeString(iter978)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21157,11 +21157,11 @@ class get_table_objects_by_name_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype989, _size986) = iprot.readListBegin()
-          for _i990 in xrange(_size986):
-            _elem991 = Table()
-            _elem991.read(iprot)
-            self.success.append(_elem991)
+          (_etype982, _size979) = iprot.readListBegin()
+          for _i983 in xrange(_size979):
+            _elem984 = Table()
+            _elem984.read(iprot)
+            self.success.append(_elem984)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21178,8 +21178,8 @@ class get_table_objects_by_name_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter992 in self.success:
-        iter992.write(oprot)
+      for iter985 in self.success:
+        iter985.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21540,19 +21540,19 @@ class get_table_objects_by_name_req_result:
 class get_materialization_invalidation_info_args:
   """
   Attributes:
-   - dbname
-   - tbl_names
+   - creation_metadata
+   - validTxnList
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.STRING, 'dbname', None, None, ), # 1
-    (2, TType.LIST, 'tbl_names', (TType.STRING,None), None, ), # 2
+    (1, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 1
+    (2, TType.STRING, 'validTxnList', None, None, ), # 2
   )
 
-  def __init__(self, dbname=None, tbl_names=None,):
-    self.dbname = dbname
-    self.tbl_names = tbl_names
+  def __init__(self, creation_metadata=None, validTxnList=None,):
+    self.creation_metadata = creation_metadata
+    self.validTxnList = validTxnList
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -21564,18 +21564,14 @@ class get_materialization_invalidation_info_args:
       if ftype == TType.STOP:
         break
       if fid == 1:
-        if ftype == TType.STRING:
-          self.dbname = iprot.readString()
+        if ftype == TType.STRUCT:
+          self.creation_metadata = CreationMetadata()
+          self.creation_metadata.read(iprot)
         else:
           iprot.skip(ftype)
       elif fid == 2:
-        if ftype == TType.LIST:
-          self.tbl_names = []
-          (_etype996, _size993) = iprot.readListBegin()
-          for _i997 in xrange(_size993):
-            _elem998 = iprot.readString()
-            self.tbl_names.append(_elem998)
-          iprot.readListEnd()
+        if ftype == TType.STRING:
+          self.validTxnList = iprot.readString()
         else:
           iprot.skip(ftype)
       else:
@@ -21588,16 +21584,13 @@ class get_materialization_invalidation_info_args:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
     oprot.writeStructBegin('get_materialization_invalidation_info_args')
-    if self.dbname is not None:
-      oprot.writeFieldBegin('dbname', TType.STRING, 1)
-      oprot.writeString(self.dbname)
+    if self.creation_metadata is not None:
+      oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 1)
+      self.creation_metadata.write(oprot)
       oprot.writeFieldEnd()
-    if self.tbl_names is not None:
-      oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
-      oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-      for iter999 in self.tbl_names:
-        oprot.writeString(iter999)
-      oprot.writeListEnd()
+    if self.validTxnList is not None:
+      oprot.writeFieldBegin('validTxnList', TType.STRING, 2)
+      oprot.writeString(self.validTxnList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
@@ -21608,8 +21601,8 @@ class get_materialization_invalidation_info_args:
 
   def __hash__(self):
     value = 17
-    value = (value * 31) ^ hash(self.dbname)
-    value = (value * 31) ^ hash(self.tbl_names)
+    value = (value * 31) ^ hash(self.creation_metadata)
+    value = (value * 31) ^ hash(self.validTxnList)
     return value
 
   def __repr__(self):
@@ -21633,7 +21626,7 @@ class get_materialization_invalidation_info_result:
   """
 
   thrift_spec = (
-    (0, TType.MAP, 'success', (TType.STRING,None,TType.STRUCT,(Materialization, Materialization.thrift_spec)), None, ), # 0
+    (0, TType.STRUCT, 'success', (Materialization, Materialization.thrift_spec), None, ), # 0
     (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
     (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2
     (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3
@@ -21655,15 +21648,9 @@ class get_materialization_invalidation_info_result:
       if ftype == TType.STOP:
         break
       if fid == 0:
-        if ftype == TType.MAP:
-          self.success = {}
-          (_ktype1001, _vtype1002, _size1000 ) = iprot.readMapBegin()
-          for _i1004 in xrange(_size1000):
-            _key1005 = iprot.readString()
-            _val1006 = Materialization()
-            _val1006.read(iprot)
-            self.success[_key1005] = _val1006
-          iprot.readMapEnd()
+        if ftype == TType.STRUCT:
+          self.success = Materialization()
+          self.success.read(iprot)
         else:
           iprot.skip(ftype)
       elif fid == 1:
@@ -21695,12 +21682,8 @@ class get_materialization_invalidation_info_result:
       return
     oprot.writeStructBegin('get_materialization_invalidation_info_result')
     if self.success is not None:
-      oprot.writeFieldBegin('success', TType.MAP, 0)
-      oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-      for kiter1007,viter1008 in self.success.items():
-        oprot.writeString(kiter1007)
-        viter1008.write(oprot)
-      oprot.writeMapEnd()
+      oprot.writeFieldBegin('success', TType.STRUCT, 0)
+      self.success.write(oprot)
       oprot.writeFieldEnd()
     if self.o1 is not None:
       oprot.writeFieldBegin('o1', TType.STRUCT, 1)
@@ -22064,10 +22047,10 @@ class get_table_names_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1012, _size1009) = iprot.readListBegin()
-          for _i1013 in xrange(_size1009):
-            _elem1014 = iprot.readString()
-            self.success.append(_elem1014)
+          (_etype989, _size986) = iprot.readListBegin()
+          for _i990 in xrange(_size986):
+            _elem991 = iprot.readString()
+            self.success.append(_elem991)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -22102,8 +22085,8 @@ class get_table_names_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1015 in self.success:
-        oprot.writeString(iter1015)
+      for iter992 in self.success:
+        oprot.writeString(iter992)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -23073,11 +23056,11 @@ class add_partitions_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1019, _size1016) = iprot.readListBegin()
-          for _i1020 in xrange(_size1016):
-            _elem1021 = Partition()
-            _elem1021.read(iprot)
-            self.new_parts.append(_elem1021)
+          (_etype996, _size993) = iprot.readListBegin()
+          for _i997 in xrange(_size993):
+            _elem998 = Partition()
+            _elem998.read(iprot)
+            self.new_parts.append(_elem998)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23094,8 +23077,8 @@ class add_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1022 in self.new_parts:
-        iter1022.write(oprot)
+      for iter999 in self.new_parts:
+        iter999.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23253,11 +23236,11 @@ class add_partitions_pspec_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1026, _size1023) = iprot.readListBegin()
-          for _i1027 in xrange(_size1023):
-            _elem1028 = PartitionSpec()
-            _elem1028.read(iprot)
-            self.new_parts.append(_elem1028)
+          (_etype1003, _size1000) = iprot.readListBegin()
+          for _i1004 in xrange(_size1000):
+            _elem1005 = PartitionSpec()
+            _elem1005.read(iprot)
+            self.new_parts.append(_elem1005)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23274,8 +23257,8 @@ class add_partitions_pspec_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1029 in self.new_parts:
-        iter1029.write(oprot)
+      for iter1006 in self.new_parts:
+        iter1006.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23449,10 +23432,10 @@ class append_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1033, _size1030) = iprot.readListBegin()
-          for _i1034 in xrange(_size1030):
-            _elem1035 = iprot.readString()
-            self.part_vals.append(_elem1035)
+          (_etype1010, _size1007) = iprot.readListBegin()
+          for _i1011 in xrange(_size1007):
+            _elem1012 = iprot.readString()
+            self.part_vals.append(_elem1012)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23477,8 +23460,8 @@ class append_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1036 in self.part_vals:
-        oprot.writeString(iter1036)
+      for iter1013 in self.part_vals:
+        oprot.writeString(iter1013)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23831,10 +23814,10 @@ class append_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1040, _size1037) = iprot.readListBegin()
-          for _i1041 in xrange(_size1037):
-            _elem1042 = iprot.readString()
-            self.part_vals.append(_elem1042)
+          (_etype1017, _size1014) = iprot.readListBegin()
+          for _i1018 in xrange(_size1014):
+            _elem1019 = iprot.readString()
+            self.part_vals.append(_elem1019)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -23865,8 +23848,8 @@ class append_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1043 in self.part_vals:
-        oprot.writeString(iter1043)
+      for iter1020 in self.part_vals:
+        oprot.writeString(iter1020)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -24461,10 +24444,10 @@ class drop_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1047, _size1044) = iprot.readListBegin()
-          for _i1048 in xrange(_size1044):
-            _elem1049 = iprot.readString()
-            self.part_vals.append(_elem1049)
+          (_etype1024, _size1021) = iprot.readListBegin()
+          for _i1025 in xrange(_size1021):
+            _elem1026 = iprot.readString()
+            self.part_vals.append(_elem1026)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24494,8 +24477,8 @@ class drop_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1050 in self.part_vals:
-        oprot.writeString(iter1050)
+      for iter1027 in self.part_vals:
+        oprot.writeString(iter1027)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -24668,10 +24651,10 @@ class drop_partition_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1054, _size1051) = iprot.readListBegin()
-          for _i1055 in xrange(_size1051):
-            _elem1056 = iprot.readString()
-            self.part_vals.append(_elem1056)
+          (_etype1031, _size1028) = iprot.readListBegin()
+          for _i1032 in xrange(_size1028):
+            _elem1033 = iprot.readString()
+            self.part_vals.append(_elem1033)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -24707,8 +24690,8 @@ class drop_partition_with_environment_context_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1057 in self.part_vals:
-        oprot.writeString(iter1057)
+      for iter1034 in self.part_vals:
+        oprot.writeString(iter1034)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.deleteData is not None:
@@ -25445,10 +25428,10 @@ class get_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1061, _size1058) = iprot.readListBegin()
-          for _i1062 in xrange(_size1058):
-            _elem1063 = iprot.readString()
-            self.part_vals.append(_elem1063)
+          (_etype1038, _size1035) = iprot.readListBegin()
+          for _i1039 in xrange(_size1035):
+            _elem1040 = iprot.readString()
+            self.part_vals.append(_elem1040)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -25473,8 +25456,8 @@ class get_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1064 in self.part_vals:
-        oprot.writeString(iter1064)
+      for iter1041 in self.part_vals:
+        oprot.writeString(iter1041)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -25633,11 +25616,11 @@ class exchange_partition_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype1066, _vtype1067, _size1065 ) = iprot.readMapBegin()
-          for _i1069 in xrange(_size1065):
-            _key1070 = iprot.readString()
-            _val1071 = iprot.readString()
-            self.partitionSpecs[_key1070] = _val1071
+          (_ktype1043, _vtype1044, _size1042 ) = iprot.readMapBegin()
+          for _i1046 in xrange(_size1042):
+            _key1047 = iprot.readString()
+            _val1048 = iprot.readString()
+            self.partitionSpecs[_key1047] = _val1048
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25674,9 +25657,9 @@ class exchange_partition_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter1072,viter1073 in self.partitionSpecs.items():
-        oprot.writeString(kiter1072)
-        oprot.writeString(viter1073)
+      for kiter1049,viter1050 in self.partitionSpecs.items():
+        oprot.writeString(kiter1049)
+        oprot.writeString(viter1050)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -25881,11 +25864,11 @@ class exchange_partitions_args:
       if fid == 1:
         if ftype == TType.MAP:
           self.partitionSpecs = {}
-          (_ktype1075, _vtype1076, _size1074 ) = iprot.readMapBegin()
-          for _i1078 in xrange(_size1074):
-            _key1079 = iprot.readString()
-            _val1080 = iprot.readString()
-            self.partitionSpecs[_key1079] = _val1080
+          (_ktype1052, _vtype1053, _size1051 ) = iprot.readMapBegin()
+          for _i1055 in xrange(_size1051):
+            _key1056 = iprot.readString()
+            _val1057 = iprot.readString()
+            self.partitionSpecs[_key1056] = _val1057
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -25922,9 +25905,9 @@ class exchange_partitions_args:
     if self.partitionSpecs is not None:
       oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-      for kiter1081,viter1082 in self.partitionSpecs.items():
-        oprot.writeString(kiter1081)
-        oprot.writeString(viter1082)
+      for kiter1058,viter1059 in self.partitionSpecs.items():
+        oprot.writeString(kiter1058)
+        oprot.writeString(viter1059)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.source_db is not None:
@@ -26007,11 +25990,11 @@ class exchange_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1086, _size1083) = iprot.readListBegin()
-          for _i1087 in xrange(_size1083):
-            _elem1088 = Partition()
-            _elem1088.read(iprot)
-            self.success.append(_elem1088)
+          (_etype1063, _size1060) = iprot.readListBegin()
+          for _i1064 in xrange(_size1060):
+            _elem1065 = Partition()
+            _elem1065.read(iprot)
+            self.success.append(_elem1065)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26052,8 +26035,8 @@ class exchange_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1089 in self.success:
-        iter1089.write(oprot)
+      for iter1066 in self.success:
+        iter1066.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26147,10 +26130,10 @@ class get_partition_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1093, _size1090) = iprot.readListBegin()
-          for _i1094 in xrange(_size1090):
-            _elem1095 = iprot.readString()
-            self.part_vals.append(_elem1095)
+          (_etype1070, _size1067) = iprot.readListBegin()
+          for _i1071 in xrange(_size1067):
+            _elem1072 = iprot.readString()
+            self.part_vals.append(_elem1072)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26162,10 +26145,10 @@ class get_partition_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1099, _size1096) = iprot.readListBegin()
-          for _i1100 in xrange(_size1096):
-            _elem1101 = iprot.readString()
-            self.group_names.append(_elem1101)
+          (_etype1076, _size1073) = iprot.readListBegin()
+          for _i1077 in xrange(_size1073):
+            _elem1078 = iprot.readString()
+            self.group_names.append(_elem1078)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26190,8 +26173,8 @@ class get_partition_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1102 in self.part_vals:
-        oprot.writeString(iter1102)
+      for iter1079 in self.part_vals:
+        oprot.writeString(iter1079)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.user_name is not None:
@@ -26201,8 +26184,8 @@ class get_partition_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1103 in self.group_names:
-        oprot.writeString(iter1103)
+      for iter1080 in self.group_names:
+        oprot.writeString(iter1080)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -26631,11 +26614,11 @@ class get_partitions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1107, _size1104) = iprot.readListBegin()
-          for _i1108 in xrange(_size1104):
-            _elem1109 = Partition()
-            _elem1109.read(iprot)
-            self.success.append(_elem1109)
+          (_etype1084, _size1081) = iprot.readListBegin()
+          for _i1085 in xrange(_size1081):
+            _elem1086 = Partition()
+            _elem1086.read(iprot)
+            self.success.append(_elem1086)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26664,8 +26647,8 @@ class get_partitions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1110 in self.success:
-        iter1110.write(oprot)
+      for iter1087 in self.success:
+        iter1087.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -26759,10 +26742,10 @@ class get_partitions_with_auth_args:
       elif fid == 5:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1114, _size1111) = iprot.readListBegin()
-          for _i1115 in xrange(_size1111):
-            _elem1116 = iprot.readString()
-            self.group_names.append(_elem1116)
+          (_etype1091, _size1088) = iprot.readListBegin()
+          for _i1092 in xrange(_size1088):
+            _elem1093 = iprot.readString()
+            self.group_names.append(_elem1093)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26795,8 +26778,8 @@ class get_partitions_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1117 in self.group_names:
-        oprot.writeString(iter1117)
+      for iter1094 in self.group_names:
+        oprot.writeString(iter1094)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -26857,11 +26840,11 @@ class get_partitions_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1121, _size1118) = iprot.readListBegin()
-          for _i1122 in xrange(_size1118):
-            _elem1123 = Partition()
-            _elem1123.read(iprot)
-            self.success.append(_elem1123)
+          (_etype1098, _size1095) = iprot.readListBegin()
+          for _i1099 in xrange(_size1095):
+            _elem1100 = Partition()
+            _elem1100.read(iprot)
+            self.success.append(_elem1100)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -26890,8 +26873,8 @@ class get_partitions_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1124 in self.success:
-        iter1124.write(oprot)
+      for iter1101 in self.success:
+        iter1101.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27049,11 +27032,11 @@ class get_partitions_pspec_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1128, _size1125) = iprot.readListBegin()
-          for _i1129 in xrange(_size1125):
-            _elem1130 = PartitionSpec()
-            _elem1130.read(iprot)
-            self.success.append(_elem1130)
+          (_etype1105, _size1102) = iprot.readListBegin()
+          for _i1106 in xrange(_size1102):
+            _elem1107 = PartitionSpec()
+            _elem1107.read(iprot)
+            self.success.append(_elem1107)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27082,8 +27065,8 @@ class get_partitions_pspec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1131 in self.success:
-        iter1131.write(oprot)
+      for iter1108 in self.success:
+        iter1108.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27241,10 +27224,10 @@ class get_partition_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1135, _size1132) = iprot.readListBegin()
-          for _i1136 in xrange(_size1132):
-            _elem1137 = iprot.readString()
-            self.success.append(_elem1137)
+          (_etype1112, _size1109) = iprot.readListBegin()
+          for _i1113 in xrange(_size1109):
+            _elem1114 = iprot.readString()
+            self.success.append(_elem1114)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27273,8 +27256,8 @@ class get_partition_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1138 in self.success:
-        oprot.writeString(iter1138)
+      for iter1115 in self.success:
+        oprot.writeString(iter1115)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27514,10 +27497,10 @@ class get_partitions_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1142, _size1139) = iprot.readListBegin()
-          for _i1143 in xrange(_size1139):
-            _elem1144 = iprot.readString()
-            self.part_vals.append(_elem1144)
+          (_etype1119, _size1116) = iprot.readListBegin()
+          for _i1120 in xrange(_size1116):
+            _elem1121 = iprot.readString()
+            self.part_vals.append(_elem1121)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27547,8 +27530,8 @@ class get_partitions_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1145 in self.part_vals:
-        oprot.writeString(iter1145)
+      for iter1122 in self.part_vals:
+        oprot.writeString(iter1122)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -27612,11 +27595,11 @@ class get_partitions_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1149, _size1146) = iprot.readListBegin()
-          for _i1150 in xrange(_size1146):
-            _elem1151 = Partition()
-            _elem1151.read(iprot)
-            self.success.append(_elem1151)
+          (_etype1126, _size1123) = iprot.readListBegin()
+          for _i1127 in xrange(_size1123):
+            _elem1128 = Partition()
+            _elem1128.read(iprot)
+            self.success.append(_elem1128)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27645,8 +27628,8 @@ class get_partitions_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1152 in self.success:
-        iter1152.write(oprot)
+      for iter1129 in self.success:
+        iter1129.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27733,10 +27716,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1156, _size1153) = iprot.readListBegin()
-          for _i1157 in xrange(_size1153):
-            _elem1158 = iprot.readString()
-            self.part_vals.append(_elem1158)
+          (_etype1133, _size1130) = iprot.readListBegin()
+          for _i1134 in xrange(_size1130):
+            _elem1135 = iprot.readString()
+            self.part_vals.append(_elem1135)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27753,10 +27736,10 @@ class get_partitions_ps_with_auth_args:
       elif fid == 6:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1162, _size1159) = iprot.readListBegin()
-          for _i1163 in xrange(_size1159):
-            _elem1164 = iprot.readString()
-            self.group_names.append(_elem1164)
+          (_etype1139, _size1136) = iprot.readListBegin()
+          for _i1140 in xrange(_size1136):
+            _elem1141 = iprot.readString()
+            self.group_names.append(_elem1141)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27781,8 +27764,8 @@ class get_partitions_ps_with_auth_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1165 in self.part_vals:
-        oprot.writeString(iter1165)
+      for iter1142 in self.part_vals:
+        oprot.writeString(iter1142)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -27796,8 +27779,8 @@ class get_partitions_ps_with_auth_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 6)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1166 in self.group_names:
-        oprot.writeString(iter1166)
+      for iter1143 in self.group_names:
+        oprot.writeString(iter1143)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -27859,11 +27842,11 @@ class get_partitions_ps_with_auth_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1170, _size1167) = iprot.readListBegin()
-          for _i1171 in xrange(_size1167):
-            _elem1172 = Partition()
-            _elem1172.read(iprot)
-            self.success.append(_elem1172)
+          (_etype1147, _size1144) = iprot.readListBegin()
+          for _i1148 in xrange(_size1144):
+            _elem1149 = Partition()
+            _elem1149.read(iprot)
+            self.success.append(_elem1149)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -27892,8 +27875,8 @@ class get_partitions_ps_with_auth_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1173 in self.success:
-        iter1173.write(oprot)
+      for iter1150 in self.success:
+        iter1150.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -27974,10 +27957,10 @@ class get_partition_names_ps_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1177, _size1174) = iprot.readListBegin()
-          for _i1178 in xrange(_size1174):
-            _elem1179 = iprot.readString()
-            self.part_vals.append(_elem1179)
+          (_etype1154, _size1151) = iprot.readListBegin()
+          for _i1155 in xrange(_size1151):
+            _elem1156 = iprot.readString()
+            self.part_vals.append(_elem1156)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28007,8 +27990,8 @@ class get_partition_names_ps_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1180 in self.part_vals:
-        oprot.writeString(iter1180)
+      for iter1157 in self.part_vals:
+        oprot.writeString(iter1157)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.max_parts is not None:
@@ -28072,10 +28055,10 @@ class get_partition_names_ps_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1184, _size1181) = iprot.readListBegin()
-          for _i1185 in xrange(_size1181):
-            _elem1186 = iprot.readString()
-            self.success.append(_elem1186)
+          (_etype1161, _size1158) = iprot.readListBegin()
+          for _i1162 in xrange(_size1158):
+            _elem1163 = iprot.readString()
+            self.success.append(_elem1163)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28104,8 +28087,8 @@ class get_partition_names_ps_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1187 in self.success:
-        oprot.writeString(iter1187)
+      for iter1164 in self.success:
+        oprot.writeString(iter1164)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28276,11 +28259,11 @@ class get_partitions_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1191, _size1188) = iprot.readListBegin()
-          for _i1192 in xrange(_size1188):
-            _elem1193 = Partition()
-            _elem1193.read(iprot)
-            self.success.append(_elem1193)
+          (_etype1168, _size1165) = iprot.readListBegin()
+          for _i1169 in xrange(_size1165):
+            _elem1170 = Partition()
+            _elem1170.read(iprot)
+            self.success.append(_elem1170)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28309,8 +28292,8 @@ class get_partitions_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1194 in self.success:
-        iter1194.write(oprot)
+      for iter1171 in self.success:
+        iter1171.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28481,11 +28464,11 @@ class get_part_specs_by_filter_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1198, _size1195) = iprot.readListBegin()
-          for _i1199 in xrange(_size1195):
-            _elem1200 = PartitionSpec()
-            _elem1200.read(iprot)
-            self.success.append(_elem1200)
+          (_etype1175, _size1172) = iprot.readListBegin()
+          for _i1176 in xrange(_size1172):
+            _elem1177 = PartitionSpec()
+            _elem1177.read(iprot)
+            self.success.append(_elem1177)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28514,8 +28497,8 @@ class get_part_specs_by_filter_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1201 in self.success:
-        iter1201.write(oprot)
+      for iter1178 in self.success:
+        iter1178.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -28935,10 +28918,10 @@ class get_partitions_by_names_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.names = []
-          (_etype1205, _size1202) = iprot.readListBegin()
-          for _i1206 in xrange(_size1202):
-            _elem1207 = iprot.readString()
-            self.names.append(_elem1207)
+          (_etype1182, _size1179) = iprot.readListBegin()
+          for _i1183 in xrange(_size1179):
+            _elem1184 = iprot.readString()
+            self.names.append(_elem1184)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -28963,8 +28946,8 @@ class get_partitions_by_names_args:
     if self.names is not None:
       oprot.writeFieldBegin('names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.names))
-      for iter1208 in self.names:
-        oprot.writeString(iter1208)
+      for iter1185 in self.names:
+        oprot.writeString(iter1185)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -29023,11 +29006,11 @@ class get_partitions_by_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1212, _size1209) = iprot.readListBegin()
-          for _i1213 in xrange(_size1209):
-            _elem1214 = Partition()
-            _elem1214.read(iprot)
-            self.success.append(_elem1214)
+          (_etype1189, _size1186) = iprot.readListBegin()
+          for _i1190 in xrange(_size1186):
+            _elem1191 = Partition()
+            _elem1191.read(iprot)
+            self.success.append(_elem1191)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29056,8 +29039,8 @@ class get_partitions_by_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1215 in self.success:
-        iter1215.write(oprot)
+      for iter1192 in self.success:
+        iter1192.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -29307,11 +29290,11 @@ class alter_partitions_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1219, _size1216) = iprot.readListBegin()
-          for _i1220 in xrange(_size1216):
-            _elem1221 = Partition()
-            _elem1221.read(iprot)
-            self.new_parts.append(_elem1221)
+          (_etype1196, _size1193) = iprot.readListBegin()
+          for _i1197 in xrange(_size1193):
+            _elem1198 = Partition()
+            _elem1198.read(iprot)
+            self.new_parts.append(_elem1198)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29336,8 +29319,8 @@ class alter_partitions_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1222 in self.new_parts:
-        iter1222.write(oprot)
+      for iter1199 in self.new_parts:
+        iter1199.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -29490,11 +29473,11 @@ class alter_partitions_with_environment_context_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.new_parts = []
-          (_etype1226, _size1223) = iprot.readListBegin()
-          for _i1227 in xrange(_size1223):
-            _elem1228 = Partition()
-            _elem1228.read(iprot)
-            self.new_parts.append(_elem1228)
+          (_etype1203, _size1200) = iprot.readListBegin()
+          for _i1204 in xrange(_size1200):
+            _elem1205 = Partition()
+            _elem1205.read(iprot)
+            self.new_parts.append(_elem1205)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29525,8 +29508,8 @@ class alter_partitions_with_environment_context_args:
     if self.new_parts is not None:
       oprot.writeFieldBegin('new_parts', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-      for iter1229 in self.new_parts:
-        iter1229.write(oprot)
+      for iter1206 in self.new_parts:
+        iter1206.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.environment_context is not None:
@@ -29870,10 +29853,10 @@ class rename_partition_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1233, _size1230) = iprot.readListBegin()
-          for _i1234 in xrange(_size1230):
-            _elem1235 = iprot.readString()
-            self.part_vals.append(_elem1235)
+          (_etype1210, _size1207) = iprot.readListBegin()
+          for _i1211 in xrange(_size1207):
+            _elem1212 = iprot.readString()
+            self.part_vals.append(_elem1212)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -29904,8 +29887,8 @@ class rename_partition_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1236 in self.part_vals:
-        oprot.writeString(iter1236)
+      for iter1213 in self.part_vals:
+        oprot.writeString(iter1213)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.new_part is not None:
@@ -30047,10 +30030,10 @@ class partition_name_has_valid_characters_args:
       if fid == 1:
         if ftype == TType.LIST:
           self.part_vals = []
-          (_etype1240, _size1237) = iprot.readListBegin()
-          for _i1241 in xrange(_size1237):
-            _elem1242 = iprot.readString()
-            self.part_vals.append(_elem1242)
+          (_etype1217, _size1214) = iprot.readListBegin()
+          for _i1218 in xrange(_size1214):
+            _elem1219 = iprot.readString()
+            self.part_vals.append(_elem1219)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30072,8 +30055,8 @@ class partition_name_has_valid_characters_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.part_vals))
-      for iter1243 in self.part_vals:
-        oprot.writeString(iter1243)
+      for iter1220 in self.part_vals:
+        oprot.writeString(iter1220)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.throw_exception is not None:
@@ -30431,10 +30414,10 @@ class partition_name_to_vals_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1247, _size1244) = iprot.readListBegin()
-          for _i1248 in xrange(_size1244):
-            _elem1249 = iprot.readString()
-            self.success.append(_elem1249)
+          (_etype1224, _size1221) = iprot.readListBegin()
+          for _i1225 in xrange(_size1221):
+            _elem1226 = iprot.readString()
+            self.success.append(_elem1226)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -30457,8 +30440,8 @@ class partition_name_to_vals_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1250 in self.success:
-        oprot.writeString(iter1250)
+      for iter1227 in self.success:
+        oprot.writeString(iter1227)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30582,11 +30565,11 @@ class partition_name_to_spec_result:
       if fid == 0:
         if ftype == TType.MAP:
           self.success = {}
-          (_ktype1252, _vtype1253, _size1251 ) = iprot.readMapBegin()
-          for _i1255 in xrange(_size1251):
-            _key1256 = iprot.readString()
-            _val1257 = iprot.readString()
-            self.success[_key1256] = _val1257
+          (_ktype1229, _vtype1230, _size1228 ) = iprot.readMapBegin()
+          for _i1232 in xrange(_size1228):
+            _key1233 = iprot.readString()
+            _val1234 = iprot.readString()
+            self.success[_key1233] = _val1234
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -30609,9 +30592,9 @@ class partition_name_to_spec_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.MAP, 0)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
-      for kiter1258,viter1259 in self.success.items():
-        oprot.writeString(kiter1258)
-        oprot.writeString(viter1259)
+      for kiter1235,viter1236 in self.success.items():
+        oprot.writeString(kiter1235)
+        oprot.writeString(viter1236)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -30687,11 +30670,11 @@ class markPartitionForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype1261, _vtype1262, _size1260 ) = iprot.readMapBegin()
-          for _i1264 in xrange(_size1260):
-            _key1265 = iprot.readString()
-            _val1266 = iprot.readString()
-            self.part_vals[_key1265] = _val1266
+          (_ktype1238, _vtype1239, _size1237 ) = iprot.readMapBegin()
+          for _i1241 in xrange(_size1237):
+            _key1242 = iprot.readString()
+            _val1243 = iprot.readString()
+            self.part_vals[_key1242] = _val1243
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -30721,9 +30704,9 @@ class markPartitionForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter1267,viter1268 in self.part_vals.items():
-        oprot.writeString(kiter1267)
-        oprot.writeString(viter1268)
+      for kiter1244,viter1245 in self.part_vals.items():
+        oprot.writeString(kiter1244)
+        oprot.writeString(viter1245)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -30937,11 +30920,11 @@ class isPartitionMarkedForEvent_args:
       elif fid == 3:
         if ftype == TType.MAP:
           self.part_vals = {}
-          (_ktype1270, _vtype1271, _size1269 ) = iprot.readMapBegin()
-          for _i1273 in xrange(_size1269):
-            _key1274 = iprot.readString()
-            _val1275 = iprot.readString()
-            self.part_vals[_key1274] = _val1275
+          (_ktype1247, _vtype1248, _size1246 ) = iprot.readMapBegin()
+          for _i1250 in xrange(_size1246):
+            _key1251 = iprot.readString()
+            _val1252 = iprot.readString()
+            self.part_vals[_key1251] = _val1252
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -30971,9 +30954,9 @@ class isPartitionMarkedForEvent_args:
     if self.part_vals is not None:
       oprot.writeFieldBegin('part_vals', TType.MAP, 3)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals))
-      for kiter1276,viter1277 in self.part_vals.items():
-        oprot.writeString(kiter1276)
-        oprot.writeString(viter1277)
+      for kiter1253,viter1254 in self.part_vals.items():
+        oprot.writeString(kiter1253)
+        oprot.writeString(viter1254)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.eventType is not None:
@@ -34625,10 +34608,10 @@ class get_functions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1281, _size1278) = iprot.readListBegin()
-          for _i1282 in xrange(_size1278):
-            _elem1283 = iprot.readString()
-            self.success.append(_elem1283)
+          (_etype1258, _size1255) = iprot.readListBegin()
+          for _i1259 in xrange(_size1255):
+            _elem1260 = iprot.readString()
+            self.success.append(_elem1260)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -34651,8 +34634,8 @@ class get_functions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1284 in self.success:
-        oprot.writeString(iter1284)
+      for iter1261 in self.success:
+        oprot.writeString(iter1261)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -35340,10 +35323,10 @@ class get_role_names_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1288, _size1285) = iprot.readListBegin()
-          for _i1289 in xrange(_size1285):
-            _elem1290 = iprot.readString()
-            self.success.append(_elem1290)
+          (_etype1265, _size1262) = iprot.readListBegin()
+          for _i1266 in xrange(_size1262):
+            _elem1267 = iprot.readString()
+            self.success.append(_elem1267)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -35366,8 +35349,8 @@ class get_role_names_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1291 in self.success:
-        oprot.writeString(iter1291)
+      for iter1268 in self.success:
+        oprot.writeString(iter1268)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -35881,11 +35864,11 @@ class list_roles_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1295, _size1292) = iprot.readListBegin()
-          for _i1296 in xrange(_size1292):
-            _elem1297 = Role()
-            _elem1297.read(iprot)
-            self.success.append(_elem1297)
+          (_etype1272, _size1269) = iprot.readListBegin()
+          for _i1273 in xrange(_size1269):
+            _elem1274 = Role()
+            _elem1274.read(iprot)
+            self.success.append(_elem1274)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -35908,8 +35891,8 @@ class list_roles_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1298 in self.success:
-        iter1298.write(oprot)
+      for iter1275 in self.success:
+        iter1275.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -36418,10 +36401,10 @@ class get_privilege_set_args:
       elif fid == 3:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1302, _size1299) = iprot.readListBegin()
-          for _i1303 in xrange(_size1299):
-            _elem1304 = iprot.readString()
-            self.group_names.append(_elem1304)
+          (_etype1279, _size1276) = iprot.readListBegin()
+          for _i1280 in xrange(_size1276):
+            _elem1281 = iprot.readString()
+            self.group_names.append(_elem1281)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -36446,8 +36429,8 @@ class get_privilege_set_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 3)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1305 in self.group_names:
-        oprot.writeString(iter1305)
+      for iter1282 in self.group_names:
+        oprot.writeString(iter1282)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -36674,11 +36657,11 @@ class list_privileges_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1309, _size1306) = iprot.readListBegin()
-          for _i1310 in xrange(_size1306):
-            _elem1311 = HiveObjectPrivilege()
-            _elem1311.read(iprot)
-            self.success.append(_elem1311)
+          (_etype1286, _size1283) = iprot.readListBegin()
+          for _i1287 in xrange(_size1283):
+            _elem1288 = HiveObjectPrivilege()
+            _elem1288.read(iprot)
+            self.success.append(_elem1288)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -36701,8 +36684,8 @@ class list_privileges_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1312 in self.success:
-        iter1312.write(oprot)
+      for iter1289 in self.success:
+        iter1289.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -37372,10 +37355,10 @@ class set_ugi_args:
       elif fid == 2:
         if ftype == TType.LIST:
           self.group_names = []
-          (_etype1316, _size1313) = iprot.readListBegin()
-          for _i1317 in xrange(_size1313):
-            _elem1318 = iprot.readString()
-            self.group_names.append(_elem1318)
+          (_etype1293, _size1290) = iprot.readListBegin()
+          for _i1294 in xrange(_size1290):
+            _elem1295 = iprot.readString()
+            self.group_names.append(_elem1295)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -37396,8 +37379,8 @@ class set_ugi_args:
     if self.group_names is not None:
       oprot.writeFieldBegin('group_names', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.group_names))
-      for iter1319 in self.group_names:
-        oprot.writeString(iter1319)
+      for iter1296 in self.group_names:
+        oprot.writeString(iter1296)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -37452,10 +37435,10 @@ class set_ugi_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1323, _size1320) = iprot.readListBegin()
-          for _i1324 in xrange(_size1320):
-            _elem1325 = iprot.readString()
-            self.success.append(_elem1325)
+          (_etype1300, _size1297) = iprot.readListBegin()
+          for _i1301 in xrange(_size1297):
+            _elem1302 = iprot.readString()
+            self.success.append(_elem1302)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -37478,8 +37461,8 @@ class set_ugi_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1326 in self.success:
-        oprot.writeString(iter1326)
+      for iter1303 in self.success:
+        oprot.writeString(iter1303)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -38411,10 +38394,10 @@ class get_all_token_identifiers_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1330, _size1327) = iprot.readListBegin()
-          for _i1331 in xrange(_size1327):
-            _elem1332 = iprot.readString()
-            self.success.append(_elem1332)
+          (_etype1307, _size1304) = iprot.readListBegin()
+          for _i1308 in xrange(_size1304):
+            _elem1309 = iprot.readString()
+            self.success.append(_elem1309)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -38431,8 +38414,8 @@ class get_all_token_identifiers_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1333 in self.success:
-        oprot.writeString(iter1333)
+      for iter1310 in self.success:
+        oprot.writeString(iter1310)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -38959,10 +38942,10 @@ class get_master_keys_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1337, _size1334) = iprot.readListBegin()
-          for _i1338 in xrange(_size1334):
-            _elem1339 = iprot.readString()
-            self.success.append(_elem1339)
+          (_etype1314, _size1311) = iprot.readListBegin()
+          for _i1315 in xrange(_size1311):
+            _elem1316 = iprot.readString()
+            self.success.append(_elem1316)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -38979,8 +38962,8 @@ class get_master_keys_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRING, len(self.success))
-      for iter1340 in self.success:
-        oprot.writeString(iter1340)
+      for iter1317 in self.success:
+        oprot.writeString(iter1317)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -47387,11 +47370,11 @@ class get_schema_all_versions_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1344, _size1341) = iprot.readListBegin()
-          for _i1345 in xrange(_size1341):
-            _elem1346 = SchemaVersion()
-            _elem1346.read(iprot)
-            self.success.append(_elem1346)
+          (_etype1321, _size1318) = iprot.readListBegin()
+          for _i1322 in xrange(_size1318):
+            _elem1323 = SchemaVersion()
+            _elem1323.read(iprot)
+            self.success.append(_elem1323)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -47420,8 +47403,8 @@ class get_schema_all_versions_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1347 in self.success:
-        iter1347.write(oprot)
+      for iter1324 in self.success:
+        iter1324.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:
@@ -48896,11 +48879,11 @@ class get_runtime_stats_result:
       if fid == 0:
         if ftype == TType.LIST:
           self.success = []
-          (_etype1351, _size1348) = iprot.readListBegin()
-          for _i1352 in xrange(_size1348):
-            _elem1353 = RuntimeStat()
-            _elem1353.read(iprot)
-            self.success.append(_elem1353)
+          (_etype1328, _size1325) = iprot.readListBegin()
+          for _i1329 in xrange(_size1325):
+            _elem1330 = RuntimeStat()
+            _elem1330.read(iprot)
+            self.success.append(_elem1330)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -48923,8 +48906,8 @@ class get_runtime_stats_result:
     if self.success is not None:
       oprot.writeFieldBegin('success', TType.LIST, 0)
       oprot.writeListBegin(TType.STRUCT, len(self.success))
-      for iter1354 in self.success:
-        iter1354.write(oprot)
+      for iter1331 in self.success:
+        iter1331.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.o1 is not None:


[20/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 93b5780..ae12471 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -20999,6 +20999,10 @@ class CreationMetadata {
    * @var string
    */
   public $validTxnList = null;
+  /**
+   * @var int
+   */
+  public $materializationTime = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
@@ -21027,6 +21031,10 @@ class CreationMetadata {
           'var' => 'validTxnList',
           'type' => TType::STRING,
           ),
+        6 => array(
+          'var' => 'materializationTime',
+          'type' => TType::I64,
+          ),
         );
     }
     if (is_array($vals)) {
@@ -21045,6 +21053,9 @@ class CreationMetadata {
       if (isset($vals['validTxnList'])) {
         $this->validTxnList = $vals['validTxnList'];
       }
+      if (isset($vals['materializationTime'])) {
+        $this->materializationTime = $vals['materializationTime'];
+      }
     }
   }
 
@@ -21116,6 +21127,13 @@ class CreationMetadata {
             $xfer += $input->skip($ftype);
           }
           break;
+        case 6:
+          if ($ftype == TType::I64) {
+            $xfer += $input->readI64($this->materializationTime);
+          } else {
+            $xfer += $input->skip($ftype);
+          }
+          break;
         default:
           $xfer += $input->skip($ftype);
           break;
@@ -21170,6 +21188,11 @@ class CreationMetadata {
       $xfer += $output->writeString($this->validTxnList);
       $xfer += $output->writeFieldEnd();
     }
+    if ($this->materializationTime !== null) {
+      $xfer += $output->writeFieldBegin('materializationTime', TType::I64, 6);
+      $xfer += $output->writeI64($this->materializationTime);
+      $xfer += $output->writeFieldEnd();
+    }
     $xfer += $output->writeFieldStop();
     $xfer += $output->writeStructEnd();
     return $xfer;
@@ -24987,18 +25010,6 @@ class Materialization {
   static $_TSPEC;
 
   /**
-   * @var string[]
-   */
-  public $tablesUsed = null;
-  /**
-   * @var string
-   */
-  public $validTxnList = null;
-  /**
-   * @var int
-   */
-  public $invalidationTime = null;
-  /**
    * @var bool
    */
   public $sourceTablesUpdateDeleteModified = null;
@@ -25007,37 +25018,12 @@ class Materialization {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
         1 => array(
-          'var' => 'tablesUsed',
-          'type' => TType::SET,
-          'etype' => TType::STRING,
-          'elem' => array(
-            'type' => TType::STRING,
-            ),
-          ),
-        2 => array(
-          'var' => 'validTxnList',
-          'type' => TType::STRING,
-          ),
-        3 => array(
-          'var' => 'invalidationTime',
-          'type' => TType::I64,
-          ),
-        4 => array(
           'var' => 'sourceTablesUpdateDeleteModified',
           'type' => TType::BOOL,
           ),
         );
     }
     if (is_array($vals)) {
-      if (isset($vals['tablesUsed'])) {
-        $this->tablesUsed = $vals['tablesUsed'];
-      }
-      if (isset($vals['validTxnList'])) {
-        $this->validTxnList = $vals['validTxnList'];
-      }
-      if (isset($vals['invalidationTime'])) {
-        $this->invalidationTime = $vals['invalidationTime'];
-      }
       if (isset($vals['sourceTablesUpdateDeleteModified'])) {
         $this->sourceTablesUpdateDeleteModified = $vals['sourceTablesUpdateDeleteModified'];
       }
@@ -25064,41 +25050,6 @@ class Materialization {
       switch ($fid)
       {
         case 1:
-          if ($ftype == TType::SET) {
-            $this->tablesUsed = array();
-            $_size763 = 0;
-            $_etype766 = 0;
-            $xfer += $input->readSetBegin($_etype766, $_size763);
-            for ($_i767 = 0; $_i767 < $_size763; ++$_i767)
-            {
-              $elem768 = null;
-              $xfer += $input->readString($elem768);
-              if (is_scalar($elem768)) {
-                $this->tablesUsed[$elem768] = true;
-              } else {
-                $this->tablesUsed []= $elem768;
-              }
-            }
-            $xfer += $input->readSetEnd();
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 2:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->validTxnList);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 3:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->invalidationTime);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 4:
           if ($ftype == TType::BOOL) {
             $xfer += $input->readBool($this->sourceTablesUpdateDeleteModified);
           } else {
@@ -25118,39 +25069,8 @@ class Materialization {
   public function write($output) {
     $xfer = 0;
     $xfer += $output->writeStructBegin('Materialization');
-    if ($this->tablesUsed !== null) {
-      if (!is_array($this->tablesUsed)) {
-        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
-      }
-      $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 1);
-      {
-        $output->writeSetBegin(TType::STRING, count($this->tablesUsed));
-        {
-          foreach ($this->tablesUsed as $iter769 => $iter770)
-          {
-            if (is_scalar($iter770)) {
-            $xfer += $output->writeString($iter769);
-            } else {
-            $xfer += $output->writeString($iter770);
-            }
-          }
-        }
-        $output->writeSetEnd();
-      }
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->validTxnList !== null) {
-      $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 2);
-      $xfer += $output->writeString($this->validTxnList);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->invalidationTime !== null) {
-      $xfer += $output->writeFieldBegin('invalidationTime', TType::I64, 3);
-      $xfer += $output->writeI64($this->invalidationTime);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->sourceTablesUpdateDeleteModified !== null) {
-      $xfer += $output->writeFieldBegin('sourceTablesUpdateDeleteModified', TType::BOOL, 4);
+      $xfer += $output->writeFieldBegin('sourceTablesUpdateDeleteModified', TType::BOOL, 1);
       $xfer += $output->writeBool($this->sourceTablesUpdateDeleteModified);
       $xfer += $output->writeFieldEnd();
     }
@@ -26403,15 +26323,15 @@ class WMFullResourcePlan {
         case 2:
           if ($ftype == TType::LST) {
             $this->pools = array();
-            $_size771 = 0;
-            $_etype774 = 0;
-            $xfer += $input->readListBegin($_etype774, $_size771);
-            for ($_i775 = 0; $_i775 < $_size771; ++$_i775)
+            $_size763 = 0;
+            $_etype766 = 0;
+            $xfer += $input->readListBegin($_etype766, $_size763);
+            for ($_i767 = 0; $_i767 < $_size763; ++$_i767)
             {
-              $elem776 = null;
-              $elem776 = new \metastore\WMPool();
-              $xfer += $elem776->read($input);
-              $this->pools []= $elem776;
+              $elem768 = null;
+              $elem768 = new \metastore\WMPool();
+              $xfer += $elem768->read($input);
+              $this->pools []= $elem768;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26421,15 +26341,15 @@ class WMFullResourcePlan {
         case 3:
           if ($ftype == TType::LST) {
             $this->mappings = array();
-            $_size777 = 0;
-            $_etype780 = 0;
-            $xfer += $input->readListBegin($_etype780, $_size777);
-            for ($_i781 = 0; $_i781 < $_size777; ++$_i781)
+            $_size769 = 0;
+            $_etype772 = 0;
+            $xfer += $input->readListBegin($_etype772, $_size769);
+            for ($_i773 = 0; $_i773 < $_size769; ++$_i773)
             {
-              $elem782 = null;
-              $elem782 = new \metastore\WMMapping();
-              $xfer += $elem782->read($input);
-              $this->mappings []= $elem782;
+              $elem774 = null;
+              $elem774 = new \metastore\WMMapping();
+              $xfer += $elem774->read($input);
+              $this->mappings []= $elem774;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26439,15 +26359,15 @@ class WMFullResourcePlan {
         case 4:
           if ($ftype == TType::LST) {
             $this->triggers = array();
-            $_size783 = 0;
-            $_etype786 = 0;
-            $xfer += $input->readListBegin($_etype786, $_size783);
-            for ($_i787 = 0; $_i787 < $_size783; ++$_i787)
+            $_size775 = 0;
+            $_etype778 = 0;
+            $xfer += $input->readListBegin($_etype778, $_size775);
+            for ($_i779 = 0; $_i779 < $_size775; ++$_i779)
             {
-              $elem788 = null;
-              $elem788 = new \metastore\WMTrigger();
-              $xfer += $elem788->read($input);
-              $this->triggers []= $elem788;
+              $elem780 = null;
+              $elem780 = new \metastore\WMTrigger();
+              $xfer += $elem780->read($input);
+              $this->triggers []= $elem780;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26457,15 +26377,15 @@ class WMFullResourcePlan {
         case 5:
           if ($ftype == TType::LST) {
             $this->poolTriggers = array();
-            $_size789 = 0;
-            $_etype792 = 0;
-            $xfer += $input->readListBegin($_etype792, $_size789);
-            for ($_i793 = 0; $_i793 < $_size789; ++$_i793)
+            $_size781 = 0;
+            $_etype784 = 0;
+            $xfer += $input->readListBegin($_etype784, $_size781);
+            for ($_i785 = 0; $_i785 < $_size781; ++$_i785)
             {
-              $elem794 = null;
-              $elem794 = new \metastore\WMPoolTrigger();
-              $xfer += $elem794->read($input);
-              $this->poolTriggers []= $elem794;
+              $elem786 = null;
+              $elem786 = new \metastore\WMPoolTrigger();
+              $xfer += $elem786->read($input);
+              $this->poolTriggers []= $elem786;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26501,9 +26421,9 @@ class WMFullResourcePlan {
       {
         $output->writeListBegin(TType::STRUCT, count($this->pools));
         {
-          foreach ($this->pools as $iter795)
+          foreach ($this->pools as $iter787)
           {
-            $xfer += $iter795->write($output);
+            $xfer += $iter787->write($output);
           }
         }
         $output->writeListEnd();
@@ -26518,9 +26438,9 @@ class WMFullResourcePlan {
       {
         $output->writeListBegin(TType::STRUCT, count($this->mappings));
         {
-          foreach ($this->mappings as $iter796)
+          foreach ($this->mappings as $iter788)
           {
-            $xfer += $iter796->write($output);
+            $xfer += $iter788->write($output);
           }
         }
         $output->writeListEnd();
@@ -26535,9 +26455,9 @@ class WMFullResourcePlan {
       {
         $output->writeListBegin(TType::STRUCT, count($this->triggers));
         {
-          foreach ($this->triggers as $iter797)
+          foreach ($this->triggers as $iter789)
           {
-            $xfer += $iter797->write($output);
+            $xfer += $iter789->write($output);
           }
         }
         $output->writeListEnd();
@@ -26552,9 +26472,9 @@ class WMFullResourcePlan {
       {
         $output->writeListBegin(TType::STRUCT, count($this->poolTriggers));
         {
-          foreach ($this->poolTriggers as $iter798)
+          foreach ($this->poolTriggers as $iter790)
           {
-            $xfer += $iter798->write($output);
+            $xfer += $iter790->write($output);
           }
         }
         $output->writeListEnd();
@@ -27107,15 +27027,15 @@ class WMGetAllResourcePlanResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->resourcePlans = array();
-            $_size799 = 0;
-            $_etype802 = 0;
-            $xfer += $input->readListBegin($_etype802, $_size799);
-            for ($_i803 = 0; $_i803 < $_size799; ++$_i803)
+            $_size791 = 0;
+            $_etype794 = 0;
+            $xfer += $input->readListBegin($_etype794, $_size791);
+            for ($_i795 = 0; $_i795 < $_size791; ++$_i795)
             {
-              $elem804 = null;
-              $elem804 = new \metastore\WMResourcePlan();
-              $xfer += $elem804->read($input);
-              $this->resourcePlans []= $elem804;
+              $elem796 = null;
+              $elem796 = new \metastore\WMResourcePlan();
+              $xfer += $elem796->read($input);
+              $this->resourcePlans []= $elem796;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27143,9 +27063,9 @@ class WMGetAllResourcePlanResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->resourcePlans));
         {
-          foreach ($this->resourcePlans as $iter805)
+          foreach ($this->resourcePlans as $iter797)
           {
-            $xfer += $iter805->write($output);
+            $xfer += $iter797->write($output);
           }
         }
         $output->writeListEnd();
@@ -27551,14 +27471,14 @@ class WMValidateResourcePlanResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->errors = array();
-            $_size806 = 0;
-            $_etype809 = 0;
-            $xfer += $input->readListBegin($_etype809, $_size806);
-            for ($_i810 = 0; $_i810 < $_size806; ++$_i810)
+            $_size798 = 0;
+            $_etype801 = 0;
+            $xfer += $input->readListBegin($_etype801, $_size798);
+            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
             {
-              $elem811 = null;
-              $xfer += $input->readString($elem811);
-              $this->errors []= $elem811;
+              $elem803 = null;
+              $xfer += $input->readString($elem803);
+              $this->errors []= $elem803;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27568,14 +27488,14 @@ class WMValidateResourcePlanResponse {
         case 2:
           if ($ftype == TType::LST) {
             $this->warnings = array();
-            $_size812 = 0;
-            $_etype815 = 0;
-            $xfer += $input->readListBegin($_etype815, $_size812);
-            for ($_i816 = 0; $_i816 < $_size812; ++$_i816)
+            $_size804 = 0;
+            $_etype807 = 0;
+            $xfer += $input->readListBegin($_etype807, $_size804);
+            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
             {
-              $elem817 = null;
-              $xfer += $input->readString($elem817);
-              $this->warnings []= $elem817;
+              $elem809 = null;
+              $xfer += $input->readString($elem809);
+              $this->warnings []= $elem809;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27603,9 +27523,9 @@ class WMValidateResourcePlanResponse {
       {
         $output->writeListBegin(TType::STRING, count($this->errors));
         {
-          foreach ($this->errors as $iter818)
+          foreach ($this->errors as $iter810)
           {
-            $xfer += $output->writeString($iter818);
+            $xfer += $output->writeString($iter810);
           }
         }
         $output->writeListEnd();
@@ -27620,9 +27540,9 @@ class WMValidateResourcePlanResponse {
       {
         $output->writeListBegin(TType::STRING, count($this->warnings));
         {
-          foreach ($this->warnings as $iter819)
+          foreach ($this->warnings as $iter811)
           {
-            $xfer += $output->writeString($iter819);
+            $xfer += $output->writeString($iter811);
           }
         }
         $output->writeListEnd();
@@ -28295,15 +28215,15 @@ class WMGetTriggersForResourePlanResponse {
         case 1:
           if ($ftype == TType::LST) {
             $this->triggers = array();
-            $_size820 = 0;
-            $_etype823 = 0;
-            $xfer += $input->readListBegin($_etype823, $_size820);
-            for ($_i824 = 0; $_i824 < $_size820; ++$_i824)
+            $_size812 = 0;
+            $_etype815 = 0;
+            $xfer += $input->readListBegin($_etype815, $_size812);
+            for ($_i816 = 0; $_i816 < $_size812; ++$_i816)
             {
-              $elem825 = null;
-              $elem825 = new \metastore\WMTrigger();
-              $xfer += $elem825->read($input);
-              $this->triggers []= $elem825;
+              $elem817 = null;
+              $elem817 = new \metastore\WMTrigger();
+              $xfer += $elem817->read($input);
+              $this->triggers []= $elem817;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28331,9 +28251,9 @@ class WMGetTriggersForResourePlanResponse {
       {
         $output->writeListBegin(TType::STRUCT, count($this->triggers));
         {
-          foreach ($this->triggers as $iter826)
+          foreach ($this->triggers as $iter818)
           {
-            $xfer += $iter826->write($output);
+            $xfer += $iter818->write($output);
           }
         }
         $output->writeListEnd();
@@ -29917,15 +29837,15 @@ class SchemaVersion {
         case 4:
           if ($ftype == TType::LST) {
             $this->cols = array();
-            $_size827 = 0;
-            $_etype830 = 0;
-            $xfer += $input->readListBegin($_etype830, $_size827);
-            for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
+            $_size819 = 0;
+            $_etype822 = 0;
+            $xfer += $input->readListBegin($_etype822, $_size819);
+            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
             {
-              $elem832 = null;
-              $elem832 = new \metastore\FieldSchema();
-              $xfer += $elem832->read($input);
-              $this->cols []= $elem832;
+              $elem824 = null;
+              $elem824 = new \metastore\FieldSchema();
+              $xfer += $elem824->read($input);
+              $this->cols []= $elem824;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30014,9 +29934,9 @@ class SchemaVersion {
       {
         $output->writeListBegin(TType::STRUCT, count($this->cols));
         {
-          foreach ($this->cols as $iter833)
+          foreach ($this->cols as $iter825)
           {
-            $xfer += $iter833->write($output);
+            $xfer += $iter825->write($output);
           }
         }
         $output->writeListEnd();
@@ -30338,15 +30258,15 @@ class FindSchemasByColsResp {
         case 1:
           if ($ftype == TType::LST) {
             $this->schemaVersions = array();
-            $_size834 = 0;
-            $_etype837 = 0;
-            $xfer += $input->readListBegin($_etype837, $_size834);
-            for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
+            $_size826 = 0;
+            $_etype829 = 0;
+            $xfer += $input->readListBegin($_etype829, $_size826);
+            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
             {
-              $elem839 = null;
-              $elem839 = new \metastore\SchemaVersionDescriptor();
-              $xfer += $elem839->read($input);
-              $this->schemaVersions []= $elem839;
+              $elem831 = null;
+              $elem831 = new \metastore\SchemaVersionDescriptor();
+              $xfer += $elem831->read($input);
+              $this->schemaVersions []= $elem831;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30374,9 +30294,9 @@ class FindSchemasByColsResp {
       {
         $output->writeListBegin(TType::STRUCT, count($this->schemaVersions));
         {
-          foreach ($this->schemaVersions as $iter840)
+          foreach ($this->schemaVersions as $iter832)
           {
-            $xfer += $iter840->write($output);
+            $xfer += $iter832->write($output);
           }
         }
         $output->writeListEnd();

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index add9197..e5d943d 100755
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@ -67,7 +67,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
   print('   get_table_objects_by_name(string dbname,  tbl_names)')
   print('  GetTableResult get_table_req(GetTableRequest req)')
   print('  GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)')
-  print('   get_materialization_invalidation_info(string dbname,  tbl_names)')
+  print('  Materialization get_materialization_invalidation_info(CreationMetadata creation_metadata, string validTxnList)')
   print('  void update_creation_metadata(string catName, string dbname, string tbl_name, CreationMetadata creation_metadata)')
   print('   get_table_names_by_filter(string dbname, string filter, i16 max_tables)')
   print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
@@ -563,7 +563,7 @@ elif cmd == 'get_materialization_invalidation_info':
   if len(args) != 2:
     print('get_materialization_invalidation_info requires 2 args')
     sys.exit(1)
-  pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),))
+  pp.pprint(client.get_materialization_invalidation_info(eval(args[0]),args[1],))
 
 elif cmd == 'update_creation_metadata':
   if len(args) != 4:


[12/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query29.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query29.q.out b/ql/src/test/results/clientpositive/perf/tez/query29.q.out
index 46ff49d..791ddb6 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query29.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query29.q.out
@@ -117,116 +117,116 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_257]
-        Limit [LIM_256] (rows=100 width=88)
+      File Output Operator [FS_259]
+        Limit [LIM_258] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_255] (rows=463823414 width=88)
+          Select Operator [SEL_257] (rows=463823414 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_254]
-              Group By Operator [GBY_253] (rows=463823414 width=88)
+            SHUFFLE [RS_256]
+              Group By Operator [GBY_255] (rows=463823414 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_49]
                   PartitionCols:_col0, _col1, _col2, _col3
                   Group By Operator [GBY_48] (rows=927646829 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col14)","sum(_col22)","sum(_col3)"],keys:_col7, _col8, _col27, _col28
-                    Merge Join Operator [MERGEJOIN_202] (rows=927646829 width=88)
+                    Merge Join Operator [MERGEJOIN_204] (rows=927646829 width=88)
                       Conds:RS_44._col1, _col2=RS_45._col14, _col13(Inner),Output:["_col3","_col7","_col8","_col14","_col22","_col27","_col28"]
                     <-Reducer 12 [SIMPLE_EDGE]
                       SHUFFLE [RS_45]
                         PartitionCols:_col14, _col13
                         Select Operator [SEL_40] (rows=843315281 width=88)
                           Output:["_col1","_col2","_col8","_col13","_col14","_col16","_col21","_col22"]
-                          Merge Join Operator [MERGEJOIN_201] (rows=843315281 width=88)
-                            Conds:RS_37._col3=RS_244._col0(Inner),Output:["_col5","_col10","_col11","_col13","_col18","_col19","_col21","_col22"]
+                          Merge Join Operator [MERGEJOIN_203] (rows=843315281 width=88)
+                            Conds:RS_37._col3=RS_246._col0(Inner),Output:["_col5","_col10","_col11","_col13","_col18","_col19","_col21","_col22"]
                           <-Map 22 [SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_244]
+                            PARTITION_ONLY_SHUFFLE [RS_246]
                               PartitionCols:_col0
-                              Select Operator [SEL_243] (rows=1704 width=1910)
+                              Select Operator [SEL_245] (rows=1704 width=1910)
                                 Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_242] (rows=1704 width=1910)
+                                Filter Operator [FIL_244] (rows=1704 width=1910)
                                   predicate:s_store_sk is not null
                                   TableScan [TS_25] (rows=1704 width=1910)
                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
                           <-Reducer 11 [SIMPLE_EDGE]
                             SHUFFLE [RS_37]
                               PartitionCols:_col3
-                              Merge Join Operator [MERGEJOIN_200] (rows=766650239 width=88)
-                                Conds:RS_34._col1=RS_236._col0(Inner),Output:["_col3","_col5","_col10","_col11","_col13","_col18","_col19"]
+                              Merge Join Operator [MERGEJOIN_202] (rows=766650239 width=88)
+                                Conds:RS_34._col1=RS_238._col0(Inner),Output:["_col3","_col5","_col10","_col11","_col13","_col18","_col19"]
                               <-Map 20 [SIMPLE_EDGE] vectorized
-                                PARTITION_ONLY_SHUFFLE [RS_236]
+                                PARTITION_ONLY_SHUFFLE [RS_238]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_235] (rows=462000 width=1436)
+                                  Select Operator [SEL_237] (rows=462000 width=1436)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_234] (rows=462000 width=1436)
+                                    Filter Operator [FIL_236] (rows=462000 width=1436)
                                       predicate:i_item_sk is not null
                                       TableScan [TS_22] (rows=462000 width=1436)
                                         default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
                               <-Reducer 10 [SIMPLE_EDGE]
                                 SHUFFLE [RS_34]
                                   PartitionCols:_col1
-                                  Merge Join Operator [MERGEJOIN_199] (rows=696954748 width=88)
+                                  Merge Join Operator [MERGEJOIN_201] (rows=696954748 width=88)
                                     Conds:RS_31._col1, _col2, _col4=RS_32._col1, _col2, _col3(Inner),Output:["_col1","_col3","_col5","_col10","_col11","_col13"]
                                   <-Reducer 15 [SIMPLE_EDGE]
                                     PARTITION_ONLY_SHUFFLE [RS_32]
                                       PartitionCols:_col1, _col2, _col3
-                                      Merge Join Operator [MERGEJOIN_198] (rows=63350266 width=77)
-                                        Conds:RS_227._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                      Merge Join Operator [MERGEJOIN_200] (rows=63350266 width=77)
+                                        Conds:RS_229._col0=RS_222._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                                       <-Map 13 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_220]
+                                        SHUFFLE [RS_222]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_217] (rows=4058 width=1119)
+                                          Select Operator [SEL_219] (rows=4058 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_215] (rows=4058 width=1119)
+                                            Filter Operator [FIL_217] (rows=4058 width=1119)
                                               predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 4 AND 7)
                                               TableScan [TS_9] (rows=73049 width=1119)
                                                 default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                       <-Map 19 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_227]
+                                        SHUFFLE [RS_229]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_226] (rows=57591150 width=77)
+                                          Select Operator [SEL_228] (rows=57591150 width=77)
                                             Output:["_col0","_col1","_col2","_col3","_col4"]
-                                            Filter Operator [FIL_225] (rows=57591150 width=77)
+                                            Filter Operator [FIL_227] (rows=57591150 width=77)
                                               predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
                                               TableScan [TS_12] (rows=57591150 width=77)
                                                 default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
                                   <-Reducer 9 [SIMPLE_EDGE]
                                     SHUFFLE [RS_31]
                                       PartitionCols:_col1, _col2, _col4
-                                      Merge Join Operator [MERGEJOIN_197] (rows=633595212 width=88)
-                                        Conds:RS_252._col0=RS_218._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                      Merge Join Operator [MERGEJOIN_199] (rows=633595212 width=88)
+                                        Conds:RS_254._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
                                       <-Map 13 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_218]
+                                        SHUFFLE [RS_220]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_216] (rows=18262 width=1119)
+                                          Select Operator [SEL_218] (rows=18262 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_214] (rows=18262 width=1119)
+                                            Filter Operator [FIL_216] (rows=18262 width=1119)
                                               predicate:((d_moy = 4) and (d_year = 1999) and d_date_sk is not null)
                                                Please refer to the previous TableScan [TS_9]
                                       <-Map 8 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_252]
+                                        SHUFFLE [RS_254]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_251] (rows=575995635 width=88)
+                                          Select Operator [SEL_253] (rows=575995635 width=88)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_250] (rows=575995635 width=88)
+                                            Filter Operator [FIL_252] (rows=575995635 width=88)
                                               predicate:((ss_customer_sk BETWEEN DynamicValue(RS_32_store_returns_sr_customer_sk_min) AND DynamicValue(RS_32_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_32_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_32_store_returns_sr_item_sk_min) AND DynamicValue(RS_32_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_32_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_35_item_i_item_sk_min) AND DynamicValue(RS_35_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_35_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_29_d1_d_date_sk_min) AND DynamicValue(RS_29_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_29_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_38_store_s_store_sk_min) AND DynamicValue(RS_38_store_s_store_sk_ma
 x) and in_bloom_filter(ss_store_sk, DynamicValue(RS_38_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_32_store_returns_sr_ticket_number_min) AND DynamicValue(RS_32_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_32_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                               TableScan [TS_6] (rows=575995635 width=88)
                                                 default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
                                               <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_224]
-                                                  Group By Operator [GBY_223] (rows=1 width=12)
+                                                BROADCAST [RS_226]
+                                                  Group By Operator [GBY_225] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_222]
-                                                      Group By Operator [GBY_221] (rows=1 width=12)
+                                                    SHUFFLE [RS_224]
+                                                      Group By Operator [GBY_223] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_219] (rows=18262 width=1119)
+                                                        Select Operator [SEL_221] (rows=18262 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_216]
+                                                           Please refer to the previous Select Operator [SEL_218]
                                               <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_229]
-                                                  Group By Operator [GBY_228] (rows=1 width=12)
+                                                BROADCAST [RS_231]
+                                                  Group By Operator [GBY_230] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                   <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
                                                     PARTITION_ONLY_SHUFFLE [RS_120]
@@ -234,10 +234,10 @@ Stage-0
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
                                                         Select Operator [SEL_118] (rows=63350266 width=77)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_198]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
                                               <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_231]
-                                                  Group By Operator [GBY_230] (rows=1 width=12)
+                                                BROADCAST [RS_233]
+                                                  Group By Operator [GBY_232] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                   <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
                                                     PARTITION_ONLY_SHUFFLE [RS_125]
@@ -245,10 +245,10 @@ Stage-0
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
                                                         Select Operator [SEL_123] (rows=63350266 width=77)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_198]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
                                               <-Reducer 18 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_233]
-                                                  Group By Operator [GBY_232] (rows=1 width=12)
+                                                BROADCAST [RS_235]
+                                                  Group By Operator [GBY_234] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                   <-Reducer 15 [CUSTOM_SIMPLE_EDGE]
                                                     PARTITION_ONLY_SHUFFLE [RS_130]
@@ -256,61 +256,61 @@ Stage-0
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
                                                         Select Operator [SEL_128] (rows=63350266 width=77)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_198]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_200]
                                               <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_241]
-                                                  Group By Operator [GBY_240] (rows=1 width=12)
+                                                BROADCAST [RS_243]
+                                                  Group By Operator [GBY_242] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_239]
-                                                      Group By Operator [GBY_238] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_241]
+                                                      Group By Operator [GBY_240] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_237] (rows=462000 width=1436)
+                                                        Select Operator [SEL_239] (rows=462000 width=1436)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_235]
+                                                           Please refer to the previous Select Operator [SEL_237]
                                               <-Reducer 23 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_249]
-                                                  Group By Operator [GBY_248] (rows=1 width=12)
+                                                BROADCAST [RS_251]
+                                                  Group By Operator [GBY_250] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_247]
-                                                      Group By Operator [GBY_246] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_249]
+                                                      Group By Operator [GBY_248] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_245] (rows=1704 width=1910)
+                                                        Select Operator [SEL_247] (rows=1704 width=1910)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_243]
+                                                           Please refer to the previous Select Operator [SEL_245]
                     <-Reducer 2 [SIMPLE_EDGE]
                       SHUFFLE [RS_44]
                         PartitionCols:_col1, _col2
-                        Merge Join Operator [MERGEJOIN_196] (rows=316788826 width=135)
-                          Conds:RS_213._col0=RS_205._col0(Inner),Output:["_col1","_col2","_col3"]
+                        Merge Join Operator [MERGEJOIN_198] (rows=316788826 width=135)
+                          Conds:RS_215._col0=RS_207._col0(Inner),Output:["_col1","_col2","_col3"]
                         <-Map 6 [SIMPLE_EDGE] vectorized
-                          PARTITION_ONLY_SHUFFLE [RS_205]
+                          PARTITION_ONLY_SHUFFLE [RS_207]
                             PartitionCols:_col0
-                            Select Operator [SEL_204] (rows=36525 width=1119)
+                            Select Operator [SEL_206] (rows=36525 width=1119)
                               Output:["_col0"]
-                              Filter Operator [FIL_203] (rows=36525 width=1119)
+                              Filter Operator [FIL_205] (rows=36525 width=1119)
                                 predicate:((d_year) IN (1999, 2000, 2001) and d_date_sk is not null)
                                 TableScan [TS_3] (rows=73049 width=1119)
                                   default@date_dim,d3,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
                         <-Map 1 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_213]
+                          SHUFFLE [RS_215]
                             PartitionCols:_col0
-                            Select Operator [SEL_212] (rows=287989836 width=135)
+                            Select Operator [SEL_214] (rows=287989836 width=135)
                               Output:["_col0","_col1","_col2","_col3"]
-                              Filter Operator [FIL_211] (rows=287989836 width=135)
+                              Filter Operator [FIL_213] (rows=287989836 width=135)
                                 predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_42_d3_d_date_sk_min) AND DynamicValue(RS_42_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_42_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                 TableScan [TS_0] (rows=287989836 width=135)
                                   default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
                                 <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_210]
-                                    Group By Operator [GBY_209] (rows=1 width=12)
+                                  BROADCAST [RS_212]
+                                    Group By Operator [GBY_211] (rows=1 width=12)
                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                     <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_208]
-                                        Group By Operator [GBY_207] (rows=1 width=12)
+                                      PARTITION_ONLY_SHUFFLE [RS_210]
+                                        Group By Operator [GBY_209] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_206] (rows=36525 width=1119)
+                                          Select Operator [SEL_208] (rows=36525 width=1119)
                                             Output:["_col0"]
-                                             Please refer to the previous Select Operator [SEL_204]
+                                             Please refer to the previous Select Operator [SEL_206]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query31.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query31.q.out b/ql/src/test/results/clientpositive/perf/tez/query31.q.out
index c4d717d..22aee37 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query31.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query31.q.out
@@ -152,377 +152,377 @@ Stage-0
           Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
           Filter Operator [FIL_130] (rows=287493839 width=88)
             predicate:(CASE WHEN ((_col1 > 0)) THEN (CASE WHEN ((_col9 > 0)) THEN (((_col11 / _col9) > (_col5 / _col1))) ELSE ((null > (_col5 / _col1))) END) ELSE (CASE WHEN ((_col9 > 0)) THEN (((_col11 / _col9) > null)) ELSE (null) END) END and CASE WHEN ((_col3 > 0)) THEN (CASE WHEN ((_col7 > 0)) THEN (((_col9 / _col7) > (_col1 / _col3))) ELSE ((null > (_col1 / _col3))) END) ELSE (CASE WHEN ((_col7 > 0)) THEN (((_col9 / _col7) > null)) ELSE (null) END) END)
-            Merge Join Operator [MERGEJOIN_439] (rows=1149975359 width=88)
-              Conds:RS_510._col0=RS_519._col0(Inner),RS_510._col0=RS_528._col0(Inner),RS_510._col0=RS_128._col0(Inner),Output:["_col0","_col1","_col3","_col5","_col7","_col9","_col11"]
+            Merge Join Operator [MERGEJOIN_448] (rows=1149975359 width=88)
+              Conds:RS_519._col0=RS_528._col0(Inner),RS_519._col0=RS_537._col0(Inner),RS_519._col0=RS_128._col0(Inner),Output:["_col0","_col1","_col3","_col5","_col7","_col9","_col11"]
             <-Reducer 10 [ONE_TO_ONE_EDGE] vectorized
-              FORWARD [RS_519]
+              FORWARD [RS_528]
                 PartitionCols:_col0
-                Group By Operator [GBY_518] (rows=348477374 width=88)
+                Group By Operator [GBY_527] (rows=348477374 width=88)
                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                 <-Reducer 9 [SIMPLE_EDGE]
                   SHUFFLE [RS_37]
                     PartitionCols:_col0
                     Group By Operator [GBY_36] (rows=696954748 width=88)
                       Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                      Merge Join Operator [MERGEJOIN_429] (rows=696954748 width=88)
-                        Conds:RS_32._col1=RS_482._col0(Inner),Output:["_col2","_col7"]
+                      Merge Join Operator [MERGEJOIN_438] (rows=696954748 width=88)
+                        Conds:RS_32._col1=RS_491._col0(Inner),Output:["_col2","_col7"]
                       <-Map 29 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_482]
+                        SHUFFLE [RS_491]
                           PartitionCols:_col0
-                          Select Operator [SEL_479] (rows=40000000 width=1014)
+                          Select Operator [SEL_488] (rows=40000000 width=1014)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_478] (rows=40000000 width=1014)
+                            Filter Operator [FIL_487] (rows=40000000 width=1014)
                               predicate:(ca_address_sk is not null and ca_county is not null)
                               TableScan [TS_6] (rows=40000000 width=1014)
                                 default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county"]
                       <-Reducer 8 [SIMPLE_EDGE]
                         SHUFFLE [RS_32]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_428] (rows=633595212 width=88)
-                            Conds:RS_517._col0=RS_454._col0(Inner),Output:["_col1","_col2"]
+                          Merge Join Operator [MERGEJOIN_437] (rows=633595212 width=88)
+                            Conds:RS_526._col0=RS_463._col0(Inner),Output:["_col1","_col2"]
                           <-Map 6 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_454]
+                            SHUFFLE [RS_463]
                               PartitionCols:_col0
-                              Select Operator [SEL_447] (rows=18262 width=1119)
+                              Select Operator [SEL_456] (rows=18262 width=1119)
                                 Output:["_col0"]
-                                Filter Operator [FIL_441] (rows=18262 width=1119)
+                                Filter Operator [FIL_450] (rows=18262 width=1119)
                                   predicate:((d_qoy = 1) and (d_year = 2000) and d_date_sk is not null)
                                   TableScan [TS_3] (rows=73049 width=1119)
                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                           <-Map 36 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_517]
+                            SHUFFLE [RS_526]
                               PartitionCols:_col0
-                              Select Operator [SEL_516] (rows=575995635 width=88)
+                              Select Operator [SEL_525] (rows=575995635 width=88)
                                 Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_515] (rows=575995635 width=88)
+                                Filter Operator [FIL_524] (rows=575995635 width=88)
                                   predicate:((ss_addr_sk BETWEEN DynamicValue(RS_33_customer_address_ca_address_sk_min) AND DynamicValue(RS_33_customer_address_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_33_customer_address_ca_address_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_30_date_dim_d_date_sk_min) AND DynamicValue(RS_30_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_30_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is not null and ss_sold_date_sk is not null)
                                   TableScan [TS_20] (rows=575995635 width=88)
                                     default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
                                   <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_512]
-                                      Group By Operator [GBY_511] (rows=1 width=12)
+                                    BROADCAST [RS_521]
+                                      Group By Operator [GBY_520] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                       <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_471]
-                                          Group By Operator [GBY_465] (rows=1 width=12)
+                                        SHUFFLE [RS_480]
+                                          Group By Operator [GBY_474] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                            Select Operator [SEL_455] (rows=18262 width=1119)
+                                            Select Operator [SEL_464] (rows=18262 width=1119)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_447]
+                                               Please refer to the previous Select Operator [SEL_456]
                                   <-Reducer 31 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_514]
-                                      Group By Operator [GBY_513] (rows=1 width=12)
+                                    BROADCAST [RS_523]
+                                      Group By Operator [GBY_522] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                       <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_499]
-                                          Group By Operator [GBY_493] (rows=1 width=12)
+                                        SHUFFLE [RS_508]
+                                          Group By Operator [GBY_502] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                            Select Operator [SEL_483] (rows=40000000 width=1014)
+                                            Select Operator [SEL_492] (rows=40000000 width=1014)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_479]
+                                               Please refer to the previous Select Operator [SEL_488]
             <-Reducer 14 [ONE_TO_ONE_EDGE] vectorized
-              FORWARD [RS_528]
+              FORWARD [RS_537]
                 PartitionCols:_col0
-                Group By Operator [GBY_527] (rows=348477374 width=88)
+                Group By Operator [GBY_536] (rows=348477374 width=88)
                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                 <-Reducer 13 [SIMPLE_EDGE]
                   SHUFFLE [RS_57]
                     PartitionCols:_col0
                     Group By Operator [GBY_56] (rows=696954748 width=88)
                       Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                      Merge Join Operator [MERGEJOIN_431] (rows=696954748 width=88)
-                        Conds:RS_52._col1=RS_484._col0(Inner),Output:["_col2","_col7"]
+                      Merge Join Operator [MERGEJOIN_440] (rows=696954748 width=88)
+                        Conds:RS_52._col1=RS_493._col0(Inner),Output:["_col2","_col7"]
                       <-Map 29 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_484]
+                        SHUFFLE [RS_493]
                           PartitionCols:_col0
-                           Please refer to the previous Select Operator [SEL_479]
+                           Please refer to the previous Select Operator [SEL_488]
                       <-Reducer 12 [SIMPLE_EDGE]
                         SHUFFLE [RS_52]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_430] (rows=633595212 width=88)
-                            Conds:RS_526._col0=RS_456._col0(Inner),Output:["_col1","_col2"]
+                          Merge Join Operator [MERGEJOIN_439] (rows=633595212 width=88)
+                            Conds:RS_535._col0=RS_465._col0(Inner),Output:["_col1","_col2"]
                           <-Map 6 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_456]
+                            SHUFFLE [RS_465]
                               PartitionCols:_col0
-                              Select Operator [SEL_448] (rows=18262 width=1119)
+                              Select Operator [SEL_457] (rows=18262 width=1119)
                                 Output:["_col0"]
-                                Filter Operator [FIL_442] (rows=18262 width=1119)
+                                Filter Operator [FIL_451] (rows=18262 width=1119)
                                   predicate:((d_qoy = 3) and (d_year = 2000) and d_date_sk is not null)
                                    Please refer to the previous TableScan [TS_3]
                           <-Map 37 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_526]
+                            SHUFFLE [RS_535]
                               PartitionCols:_col0
-                              Select Operator [SEL_525] (rows=575995635 width=88)
+                              Select Operator [SEL_534] (rows=575995635 width=88)
                                 Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_524] (rows=575995635 width=88)
+                                Filter Operator [FIL_533] (rows=575995635 width=88)
                                   predicate:((ss_addr_sk BETWEEN DynamicValue(RS_53_customer_address_ca_address_sk_min) AND DynamicValue(RS_53_customer_address_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_53_customer_address_ca_address_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is not null and ss_sold_date_sk is not null)
                                   TableScan [TS_40] (rows=575995635 width=88)
                                     default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
                                   <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_521]
-                                      Group By Operator [GBY_520] (rows=1 width=12)
+                                    BROADCAST [RS_530]
+                                      Group By Operator [GBY_529] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                       <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_472]
-                                          Group By Operator [GBY_466] (rows=1 width=12)
+                                        SHUFFLE [RS_481]
+                                          Group By Operator [GBY_475] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                            Select Operator [SEL_457] (rows=18262 width=1119)
+                                            Select Operator [SEL_466] (rows=18262 width=1119)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_448]
+                                               Please refer to the previous Select Operator [SEL_457]
                                   <-Reducer 32 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_523]
-                                      Group By Operator [GBY_522] (rows=1 width=12)
+                                    BROADCAST [RS_532]
+                                      Group By Operator [GBY_531] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                       <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_500]
-                                          Group By Operator [GBY_494] (rows=1 width=12)
+                                        SHUFFLE [RS_509]
+                                          Group By Operator [GBY_503] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                            Select Operator [SEL_485] (rows=40000000 width=1014)
+                                            Select Operator [SEL_494] (rows=40000000 width=1014)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_479]
+                                               Please refer to the previous Select Operator [SEL_488]
             <-Reducer 19 [ONE_TO_ONE_EDGE]
               FORWARD [RS_128]
                 PartitionCols:_col0
-                Merge Join Operator [MERGEJOIN_438] (rows=191667561 width=135)
-                  Conds:RS_537._col0=RS_546._col0(Inner),RS_537._col0=RS_555._col0(Inner),Output:["_col0","_col1","_col3","_col5"]
+                Merge Join Operator [MERGEJOIN_447] (rows=191667561 width=135)
+                  Conds:RS_546._col0=RS_555._col0(Inner),RS_546._col0=RS_564._col0(Inner),Output:["_col0","_col1","_col3","_col5"]
                 <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
-                  FORWARD [RS_537]
+                  FORWARD [RS_546]
                     PartitionCols:_col0
-                    Group By Operator [GBY_536] (rows=87121617 width=135)
+                    Group By Operator [GBY_545] (rows=87121617 width=135)
                       Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                     <-Reducer 17 [SIMPLE_EDGE]
                       SHUFFLE [RS_77]
                         PartitionCols:_col0
                         Group By Operator [GBY_76] (rows=174243235 width=135)
                           Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                          Merge Join Operator [MERGEJOIN_433] (rows=174243235 width=135)
-                            Conds:RS_72._col1=RS_486._col0(Inner),Output:["_col2","_col7"]
+                          Merge Join Operator [MERGEJOIN_442] (rows=174243235 width=135)
+                            Conds:RS_72._col1=RS_495._col0(Inner),Output:["_col2","_col7"]
                           <-Map 29 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_486]
+                            SHUFFLE [RS_495]
                               PartitionCols:_col0
-                               Please refer to the previous Select Operator [SEL_479]
+                               Please refer to the previous Select Operator [SEL_488]
                           <-Reducer 16 [SIMPLE_EDGE]
                             SHUFFLE [RS_72]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_432] (rows=158402938 width=135)
-                                Conds:RS_535._col0=RS_458._col0(Inner),Output:["_col1","_col2"]
+                              Merge Join Operator [MERGEJOIN_441] (rows=158402938 width=135)
+                                Conds:RS_544._col0=RS_467._col0(Inner),Output:["_col1","_col2"]
                               <-Map 6 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_458]
+                                SHUFFLE [RS_467]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_449] (rows=18262 width=1119)
+                                  Select Operator [SEL_458] (rows=18262 width=1119)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_443] (rows=18262 width=1119)
+                                    Filter Operator [FIL_452] (rows=18262 width=1119)
                                       predicate:((d_qoy = 1) and (d_year = 2000) and d_date_sk is not null)
                                        Please refer to the previous TableScan [TS_3]
                               <-Map 38 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_535]
+                                SHUFFLE [RS_544]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_534] (rows=144002668 width=135)
+                                  Select Operator [SEL_543] (rows=144002668 width=135)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_533] (rows=144002668 width=135)
+                                    Filter Operator [FIL_542] (rows=144002668 width=135)
                                       predicate:((ws_bill_addr_sk BETWEEN DynamicValue(RS_73_customer_address_ca_address_sk_min) AND DynamicValue(RS_73_customer_address_ca_address_sk_max) and in_bloom_filter(ws_bill_addr_sk, DynamicValue(RS_73_customer_address_ca_address_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_70_date_dim_d_date_sk_min) AND DynamicValue(RS_70_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_70_date_dim_d_date_sk_bloom_filter))) and ws_bill_addr_sk is not null and ws_sold_date_sk is not null)
                                       TableScan [TS_60] (rows=144002668 width=135)
                                         default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
                                       <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_530]
-                                          Group By Operator [GBY_529] (rows=1 width=12)
+                                        BROADCAST [RS_539]
+                                          Group By Operator [GBY_538] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                           <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_473]
-                                              Group By Operator [GBY_467] (rows=1 width=12)
+                                            SHUFFLE [RS_482]
+                                              Group By Operator [GBY_476] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_459] (rows=18262 width=1119)
+                                                Select Operator [SEL_468] (rows=18262 width=1119)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_449]
+                                                   Please refer to the previous Select Operator [SEL_458]
                                       <-Reducer 33 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_532]
-                                          Group By Operator [GBY_531] (rows=1 width=12)
+                                        BROADCAST [RS_541]
+                                          Group By Operator [GBY_540] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                           <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_501]
-                                              Group By Operator [GBY_495] (rows=1 width=12)
+                                            SHUFFLE [RS_510]
+                                              Group By Operator [GBY_504] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                                Select Operator [SEL_487] (rows=40000000 width=1014)
+                                                Select Operator [SEL_496] (rows=40000000 width=1014)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_479]
+                                                   Please refer to the previous Select Operator [SEL_488]
                 <-Reducer 23 [ONE_TO_ONE_EDGE] vectorized
-                  FORWARD [RS_546]
+                  FORWARD [RS_555]
                     PartitionCols:_col0
-                    Group By Operator [GBY_545] (rows=87121617 width=135)
+                    Group By Operator [GBY_554] (rows=87121617 width=135)
                       Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                     <-Reducer 22 [SIMPLE_EDGE]
                       SHUFFLE [RS_97]
                         PartitionCols:_col0
                         Group By Operator [GBY_96] (rows=174243235 width=135)
                           Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                          Merge Join Operator [MERGEJOIN_435] (rows=174243235 width=135)
-                            Conds:RS_92._col1=RS_488._col0(Inner),Output:["_col2","_col7"]
+                          Merge Join Operator [MERGEJOIN_444] (rows=174243235 width=135)
+                            Conds:RS_92._col1=RS_497._col0(Inner),Output:["_col2","_col7"]
                           <-Map 29 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_488]
+                            SHUFFLE [RS_497]
                               PartitionCols:_col0
-                               Please refer to the previous Select Operator [SEL_479]
+                               Please refer to the previous Select Operator [SEL_488]
                           <-Reducer 21 [SIMPLE_EDGE]
                             SHUFFLE [RS_92]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_434] (rows=158402938 width=135)
-                                Conds:RS_544._col0=RS_460._col0(Inner),Output:["_col1","_col2"]
+                              Merge Join Operator [MERGEJOIN_443] (rows=158402938 width=135)
+                                Conds:RS_553._col0=RS_469._col0(Inner),Output:["_col1","_col2"]
                               <-Map 6 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_460]
+                                SHUFFLE [RS_469]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_450] (rows=18262 width=1119)
+                                  Select Operator [SEL_459] (rows=18262 width=1119)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_444] (rows=18262 width=1119)
+                                    Filter Operator [FIL_453] (rows=18262 width=1119)
                                       predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
                                        Please refer to the previous TableScan [TS_3]
                               <-Map 39 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_544]
+                                SHUFFLE [RS_553]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_543] (rows=144002668 width=135)
+                                  Select Operator [SEL_552] (rows=144002668 width=135)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_542] (rows=144002668 width=135)
+                                    Filter Operator [FIL_551] (rows=144002668 width=135)
                                       predicate:((ws_bill_addr_sk BETWEEN DynamicValue(RS_93_customer_address_ca_address_sk_min) AND DynamicValue(RS_93_customer_address_ca_address_sk_max) and in_bloom_filter(ws_bill_addr_sk, DynamicValue(RS_93_customer_address_ca_address_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_90_date_dim_d_date_sk_min) AND DynamicValue(RS_90_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_90_date_dim_d_date_sk_bloom_filter))) and ws_bill_addr_sk is not null and ws_sold_date_sk is not null)
                                       TableScan [TS_80] (rows=144002668 width=135)
                                         default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
                                       <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_539]
-                                          Group By Operator [GBY_538] (rows=1 width=12)
+                                        BROADCAST [RS_548]
+                                          Group By Operator [GBY_547] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                           <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_474]
-                                              Group By Operator [GBY_468] (rows=1 width=12)
+                                            SHUFFLE [RS_483]
+                                              Group By Operator [GBY_477] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_461] (rows=18262 width=1119)
+                                                Select Operator [SEL_470] (rows=18262 width=1119)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_450]
+                                                   Please refer to the previous Select Operator [SEL_459]
                                       <-Reducer 34 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_541]
-                                          Group By Operator [GBY_540] (rows=1 width=12)
+                                        BROADCAST [RS_550]
+                                          Group By Operator [GBY_549] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                           <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_502]
-                                              Group By Operator [GBY_496] (rows=1 width=12)
+                                            SHUFFLE [RS_511]
+                                              Group By Operator [GBY_505] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                                Select Operator [SEL_489] (rows=40000000 width=1014)
+                                                Select Operator [SEL_498] (rows=40000000 width=1014)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_479]
+                                                   Please refer to the previous Select Operator [SEL_488]
                 <-Reducer 27 [ONE_TO_ONE_EDGE] vectorized
-                  FORWARD [RS_555]
+                  FORWARD [RS_564]
                     PartitionCols:_col0
-                    Group By Operator [GBY_554] (rows=87121617 width=135)
+                    Group By Operator [GBY_563] (rows=87121617 width=135)
                       Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                     <-Reducer 26 [SIMPLE_EDGE]
                       SHUFFLE [RS_117]
                         PartitionCols:_col0
                         Group By Operator [GBY_116] (rows=174243235 width=135)
                           Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                          Merge Join Operator [MERGEJOIN_437] (rows=174243235 width=135)
-                            Conds:RS_112._col1=RS_490._col0(Inner),Output:["_col2","_col7"]
+                          Merge Join Operator [MERGEJOIN_446] (rows=174243235 width=135)
+                            Conds:RS_112._col1=RS_499._col0(Inner),Output:["_col2","_col7"]
                           <-Map 29 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_490]
+                            SHUFFLE [RS_499]
                               PartitionCols:_col0
-                               Please refer to the previous Select Operator [SEL_479]
+                               Please refer to the previous Select Operator [SEL_488]
                           <-Reducer 25 [SIMPLE_EDGE]
                             SHUFFLE [RS_112]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_436] (rows=158402938 width=135)
-                                Conds:RS_553._col0=RS_462._col0(Inner),Output:["_col1","_col2"]
+                              Merge Join Operator [MERGEJOIN_445] (rows=158402938 width=135)
+                                Conds:RS_562._col0=RS_471._col0(Inner),Output:["_col1","_col2"]
                               <-Map 6 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_462]
+                                SHUFFLE [RS_471]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_451] (rows=18262 width=1119)
+                                  Select Operator [SEL_460] (rows=18262 width=1119)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_445] (rows=18262 width=1119)
+                                    Filter Operator [FIL_454] (rows=18262 width=1119)
                                       predicate:((d_qoy = 3) and (d_year = 2000) and d_date_sk is not null)
                                        Please refer to the previous TableScan [TS_3]
                               <-Map 40 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_553]
+                                SHUFFLE [RS_562]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_552] (rows=144002668 width=135)
+                                  Select Operator [SEL_561] (rows=144002668 width=135)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_551] (rows=144002668 width=135)
+                                    Filter Operator [FIL_560] (rows=144002668 width=135)
                                       predicate:((ws_bill_addr_sk BETWEEN DynamicValue(RS_113_customer_address_ca_address_sk_min) AND DynamicValue(RS_113_customer_address_ca_address_sk_max) and in_bloom_filter(ws_bill_addr_sk, DynamicValue(RS_113_customer_address_ca_address_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_110_date_dim_d_date_sk_min) AND DynamicValue(RS_110_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_110_date_dim_d_date_sk_bloom_filter))) and ws_bill_addr_sk is not null and ws_sold_date_sk is not null)
                                       TableScan [TS_100] (rows=144002668 width=135)
                                         default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_addr_sk","ws_ext_sales_price"]
                                       <-Reducer 28 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_548]
-                                          Group By Operator [GBY_547] (rows=1 width=12)
+                                        BROADCAST [RS_557]
+                                          Group By Operator [GBY_556] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                           <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_475]
-                                              Group By Operator [GBY_469] (rows=1 width=12)
+                                            SHUFFLE [RS_484]
+                                              Group By Operator [GBY_478] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_463] (rows=18262 width=1119)
+                                                Select Operator [SEL_472] (rows=18262 width=1119)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_451]
+                                                   Please refer to the previous Select Operator [SEL_460]
                                       <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_550]
-                                          Group By Operator [GBY_549] (rows=1 width=12)
+                                        BROADCAST [RS_559]
+                                          Group By Operator [GBY_558] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                           <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_503]
-                                              Group By Operator [GBY_497] (rows=1 width=12)
+                                            SHUFFLE [RS_512]
+                                              Group By Operator [GBY_506] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                                Select Operator [SEL_491] (rows=40000000 width=1014)
+                                                Select Operator [SEL_500] (rows=40000000 width=1014)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_479]
+                                                   Please refer to the previous Select Operator [SEL_488]
             <-Reducer 4 [ONE_TO_ONE_EDGE] vectorized
-              FORWARD [RS_510]
+              FORWARD [RS_519]
                 PartitionCols:_col0
-                Group By Operator [GBY_509] (rows=348477374 width=88)
+                Group By Operator [GBY_518] (rows=348477374 width=88)
                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                 <-Reducer 3 [SIMPLE_EDGE]
                   SHUFFLE [RS_17]
                     PartitionCols:_col0
                     Group By Operator [GBY_16] (rows=696954748 width=88)
                       Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col7
-                      Merge Join Operator [MERGEJOIN_427] (rows=696954748 width=88)
-                        Conds:RS_12._col1=RS_480._col0(Inner),Output:["_col2","_col7"]
+                      Merge Join Operator [MERGEJOIN_436] (rows=696954748 width=88)
+                        Conds:RS_12._col1=RS_489._col0(Inner),Output:["_col2","_col7"]
                       <-Map 29 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_480]
+                        SHUFFLE [RS_489]
                           PartitionCols:_col0
-                           Please refer to the previous Select Operator [SEL_479]
+                           Please refer to the previous Select Operator [SEL_488]
                       <-Reducer 2 [SIMPLE_EDGE]
                         SHUFFLE [RS_12]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_426] (rows=633595212 width=88)
-                            Conds:RS_508._col0=RS_452._col0(Inner),Output:["_col1","_col2"]
+                          Merge Join Operator [MERGEJOIN_435] (rows=633595212 width=88)
+                            Conds:RS_517._col0=RS_461._col0(Inner),Output:["_col1","_col2"]
                           <-Map 6 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_452]
+                            SHUFFLE [RS_461]
                               PartitionCols:_col0
-                              Select Operator [SEL_446] (rows=18262 width=1119)
+                              Select Operator [SEL_455] (rows=18262 width=1119)
                                 Output:["_col0"]
-                                Filter Operator [FIL_440] (rows=18262 width=1119)
+                                Filter Operator [FIL_449] (rows=18262 width=1119)
                                   predicate:((d_qoy = 2) and (d_year = 2000) and d_date_sk is not null)
                                    Please refer to the previous TableScan [TS_3]
                           <-Map 1 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_508]
+                            SHUFFLE [RS_517]
                               PartitionCols:_col0
-                              Select Operator [SEL_507] (rows=575995635 width=88)
+                              Select Operator [SEL_516] (rows=575995635 width=88)
                                 Output:["_col0","_col1","_col2"]
-                                Filter Operator [FIL_506] (rows=575995635 width=88)
+                                Filter Operator [FIL_515] (rows=575995635 width=88)
                                   predicate:((ss_addr_sk BETWEEN DynamicValue(RS_13_customer_address_ca_address_sk_min) AND DynamicValue(RS_13_customer_address_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_13_customer_address_ca_address_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and ss_addr_sk is not null and ss_sold_date_sk is not null)
                                   TableScan [TS_0] (rows=575995635 width=88)
                                     default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_addr_sk","ss_ext_sales_price"]
                                   <-Reducer 30 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_505]
-                                      Group By Operator [GBY_504] (rows=1 width=12)
+                                    BROADCAST [RS_514]
+                                      Group By Operator [GBY_513] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                       <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_498]
-                                          Group By Operator [GBY_492] (rows=1 width=12)
+                                        SHUFFLE [RS_507]
+                                          Group By Operator [GBY_501] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                            Select Operator [SEL_481] (rows=40000000 width=1014)
+                                            Select Operator [SEL_490] (rows=40000000 width=1014)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_479]
+                                               Please refer to the previous Select Operator [SEL_488]
                                   <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                    BROADCAST [RS_477]
-                                      Group By Operator [GBY_476] (rows=1 width=12)
+                                    BROADCAST [RS_486]
+                                      Group By Operator [GBY_485] (rows=1 width=12)
                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                       <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_470]
-                                          Group By Operator [GBY_464] (rows=1 width=12)
+                                        SHUFFLE [RS_479]
+                                          Group By Operator [GBY_473] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                            Select Operator [SEL_453] (rows=18262 width=1119)
+                                            Select Operator [SEL_462] (rows=18262 width=1119)
                                               Output:["_col0"]
-                                               Please refer to the previous Select Operator [SEL_446]
+                                               Please refer to the previous Select Operator [SEL_455]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query32.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query32.q.out b/ql/src/test/results/clientpositive/perf/tez/query32.q.out
index 6be6f7a..2a472c1 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query32.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query32.q.out
@@ -55,27 +55,24 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 1 <- Reducer 6 (BROADCAST_EDGE)
-Map 11 <- Reducer 10 (BROADCAST_EDGE), Reducer 13 (BROADCAST_EDGE)
-Reducer 10 <- Map 5 (CUSTOM_SIMPLE_EDGE)
-Reducer 13 <- Map 12 (CUSTOM_SIMPLE_EDGE)
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
-Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 9 (ONE_TO_ONE_EDGE)
+Map 1 <- Reducer 10 (BROADCAST_EDGE), Reducer 8 (BROADCAST_EDGE)
+Reducer 10 <- Map 9 (CUSTOM_SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 6 (ONE_TO_ONE_EDGE)
 Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
-Reducer 6 <- Map 5 (CUSTOM_SIMPLE_EDGE)
-Reducer 7 <- Map 11 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
-Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
-Reducer 9 <- Map 12 (SIMPLE_EDGE), Reducer 8 (ONE_TO_ONE_EDGE)
+Reducer 5 <- Reducer 2 (SIMPLE_EDGE)
+Reducer 6 <- Map 9 (SIMPLE_EDGE), Reducer 5 (ONE_TO_ONE_EDGE)
+Reducer 8 <- Map 7 (CUSTOM_SIMPLE_EDGE)
 
 Stage-0
   Fetch Operator
     limit:100
     Stage-1
       Reducer 4 vectorized
-      File Output Operator [FS_136]
-        Limit [LIM_135] (rows=1 width=112)
+      File Output Operator [FS_129]
+        Limit [LIM_128] (rows=1 width=112)
           Number of rows:100
-          Group By Operator [GBY_134] (rows=1 width=112)
+          Group By Operator [GBY_127] (rows=1 width=112)
             Output:["_col0"],aggregations:["sum(VALUE._col0)"]
           <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
             PARTITION_ONLY_SHUFFLE [RS_36]
@@ -85,103 +82,78 @@ Stage-0
                   Output:["_col2"]
                   Filter Operator [FIL_33] (rows=116155905 width=135)
                     predicate:(_col2 > CAST( (1.3 * _col6) AS decimal(14,7)))
-                    Merge Join Operator [MERGEJOIN_102] (rows=348467716 width=135)
+                    Merge Join Operator [MERGEJOIN_104] (rows=348467716 width=135)
                       Conds:RS_30._col1=RS_31._col2(Inner),Output:["_col2","_col6"]
                     <-Reducer 2 [SIMPLE_EDGE]
                       SHUFFLE [RS_30]
                         PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_99] (rows=316788826 width=135)
-                          Conds:RS_117._col0=RS_105._col0(Inner),Output:["_col1","_col2"]
-                        <-Map 5 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_105]
+                        Merge Join Operator [MERGEJOIN_101] (rows=316788826 width=135)
+                          Conds:RS_123._col0=RS_107._col0(Inner),Output:["_col1","_col2"]
+                        <-Map 7 [SIMPLE_EDGE] vectorized
+                          PARTITION_ONLY_SHUFFLE [RS_107]
                             PartitionCols:_col0
-                            Select Operator [SEL_104] (rows=8116 width=1119)
+                            Select Operator [SEL_106] (rows=8116 width=1119)
                               Output:["_col0"]
-                              Filter Operator [FIL_103] (rows=8116 width=1119)
+                              Filter Operator [FIL_105] (rows=8116 width=1119)
                                 predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00' AND TIMESTAMP'1998-06-16 00:00:00' and d_date_sk is not null)
                                 TableScan [TS_3] (rows=73049 width=1119)
                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                         <-Map 1 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_117]
+                          SHUFFLE [RS_123]
                             PartitionCols:_col0
-                            Select Operator [SEL_116] (rows=287989836 width=135)
+                            Select Operator [SEL_122] (rows=287989836 width=135)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_115] (rows=287989836 width=135)
-                                predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null)
+                              Filter Operator [FIL_121] (rows=287989836 width=135)
+                                predicate:((cs_item_sk BETWEEN DynamicValue(RS_24_item_i_item_sk_min) AND DynamicValue(RS_24_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_24_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null)
                                 TableScan [TS_0] (rows=287989836 width=135)
                                   default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_ext_discount_amt"]
-                                <-Reducer 6 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_114]
-                                    Group By Operator [GBY_113] (rows=1 width=12)
+                                <-Reducer 10 [BROADCAST_EDGE] vectorized
+                                  BROADCAST [RS_120]
+                                    Group By Operator [GBY_119] (rows=1 width=12)
                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                    <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_111]
+                                    <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
+                                      SHUFFLE [RS_118]
+                                        Group By Operator [GBY_117] (rows=1 width=12)
+                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                          Select Operator [SEL_116] (rows=231000 width=1436)
+                                            Output:["_col0"]
+                                            Select Operator [SEL_114] (rows=231000 width=1436)
+                                              Output:["_col0"]
+                                              Filter Operator [FIL_113] (rows=231000 width=1436)
+                                                predicate:((i_manufact_id = 269) and i_item_sk is not null)
+                                                TableScan [TS_20] (rows=462000 width=1436)
+                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_manufact_id"]
+                                <-Reducer 8 [BROADCAST_EDGE] vectorized
+                                  BROADCAST [RS_112]
+                                    Group By Operator [GBY_111] (rows=1 width=12)
+                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                    <-Map 7 [CUSTOM_SIMPLE_EDGE] vectorized
+                                      PARTITION_ONLY_SHUFFLE [RS_110]
                                         Group By Operator [GBY_109] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_106] (rows=8116 width=1119)
+                                          Select Operator [SEL_108] (rows=8116 width=1119)
                                             Output:["_col0"]
-                                             Please refer to the previous Select Operator [SEL_104]
-                    <-Reducer 9 [ONE_TO_ONE_EDGE]
+                                             Please refer to the previous Select Operator [SEL_106]
+                    <-Reducer 6 [ONE_TO_ONE_EDGE]
                       FORWARD [RS_31]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_101] (rows=174233858 width=135)
-                          Conds:RS_133._col0=RS_122._col0(Inner),Output:["_col1","_col2"]
-                        <-Map 12 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_122]
+                        Merge Join Operator [MERGEJOIN_103] (rows=174233858 width=135)
+                          Conds:RS_126._col0=RS_115._col0(Inner),Output:["_col1","_col2"]
+                        <-Map 9 [SIMPLE_EDGE] vectorized
+                          SHUFFLE [RS_115]
                             PartitionCols:_col0
-                            Select Operator [SEL_121] (rows=231000 width=1436)
-                              Output:["_col0"]
-                              Filter Operator [FIL_120] (rows=231000 width=1436)
-                                predicate:((i_manufact_id = 269) and i_item_sk is not null)
-                                TableScan [TS_20] (rows=462000 width=1436)
-                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_manufact_id"]
-                        <-Reducer 8 [ONE_TO_ONE_EDGE] vectorized
-                          FORWARD [RS_133]
+                             Please refer to the previous Select Operator [SEL_114]
+                        <-Reducer 5 [ONE_TO_ONE_EDGE] vectorized
+                          FORWARD [RS_126]
                             PartitionCols:_col0
-                            Select Operator [SEL_132] (rows=158394413 width=135)
+                            Select Operator [SEL_125] (rows=158394413 width=135)
                               Output:["_col0","_col1"]
-                              Group By Operator [GBY_131] (rows=158394413 width=135)
+                              Group By Operator [GBY_124] (rows=158394413 width=135)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0
-                              <-Reducer 7 [SIMPLE_EDGE]
+                              <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_17]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_16] (rows=316788826 width=135)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","count(_col2)"],keys:_col1
-                                    Merge Join Operator [MERGEJOIN_100] (rows=316788826 width=135)
-                                      Conds:RS_130._col0=RS_107._col0(Inner),Output:["_col1","_col2"]
-                                    <-Map 5 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_107]
-                                        PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_104]
-                                    <-Map 11 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_130]
-                                        PartitionCols:_col0
-                                        Select Operator [SEL_129] (rows=287989836 width=135)
-                                          Output:["_col0","_col1","_col2"]
-                                          Filter Operator [FIL_128] (rows=287989836 width=135)
-                                            predicate:((cs_item_sk BETWEEN DynamicValue(RS_24_item_i_item_sk_min) AND DynamicValue(RS_24_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_24_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                            TableScan [TS_6] (rows=287989836 width=135)
-                                              default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_item_sk","cs_ext_discount_amt"]
-                                            <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_119]
-                                                Group By Operator [GBY_118] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 5 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_112]
-                                                    Group By Operator [GBY_110] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_108] (rows=8116 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_104]
-                                            <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_127]
-                                                Group By Operator [GBY_126] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_125]
-                                                    Group By Operator [GBY_124] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_123] (rows=231000 width=1436)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_121]
+                                     Please refer to the previous Merge Join Operator [MERGEJOIN_101]
 


[26/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
index 352f5c7..a547de1 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -65,7 +65,7 @@ class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookService
   virtual void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
   virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0;
   virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0;
-  virtual void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
+  virtual void get_materialization_invalidation_info(Materialization& _return, const CreationMetadata& creation_metadata, const std::string& validTxnList) = 0;
   virtual void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0;
   virtual void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0;
   virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0;
@@ -390,7 +390,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p
   void get_table_objects_by_name_req(GetTablesResult& /* _return */, const GetTablesRequest& /* req */) {
     return;
   }
-  void get_materialization_invalidation_info(std::map<std::string, Materialization> & /* _return */, const std::string& /* dbname */, const std::vector<std::string> & /* tbl_names */) {
+  void get_materialization_invalidation_info(Materialization& /* _return */, const CreationMetadata& /* creation_metadata */, const std::string& /* validTxnList */) {
     return;
   }
   void update_creation_metadata(const std::string& /* catName */, const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) {
@@ -6131,9 +6131,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_req_presult {
 };
 
 typedef struct _ThriftHiveMetastore_get_materialization_invalidation_info_args__isset {
-  _ThriftHiveMetastore_get_materialization_invalidation_info_args__isset() : dbname(false), tbl_names(false) {}
-  bool dbname :1;
-  bool tbl_names :1;
+  _ThriftHiveMetastore_get_materialization_invalidation_info_args__isset() : creation_metadata(false), validTxnList(false) {}
+  bool creation_metadata :1;
+  bool validTxnList :1;
 } _ThriftHiveMetastore_get_materialization_invalidation_info_args__isset;
 
 class ThriftHiveMetastore_get_materialization_invalidation_info_args {
@@ -6141,24 +6141,24 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
 
   ThriftHiveMetastore_get_materialization_invalidation_info_args(const ThriftHiveMetastore_get_materialization_invalidation_info_args&);
   ThriftHiveMetastore_get_materialization_invalidation_info_args& operator=(const ThriftHiveMetastore_get_materialization_invalidation_info_args&);
-  ThriftHiveMetastore_get_materialization_invalidation_info_args() : dbname() {
+  ThriftHiveMetastore_get_materialization_invalidation_info_args() : validTxnList() {
   }
 
   virtual ~ThriftHiveMetastore_get_materialization_invalidation_info_args() throw();
-  std::string dbname;
-  std::vector<std::string>  tbl_names;
+  CreationMetadata creation_metadata;
+  std::string validTxnList;
 
   _ThriftHiveMetastore_get_materialization_invalidation_info_args__isset __isset;
 
-  void __set_dbname(const std::string& val);
+  void __set_creation_metadata(const CreationMetadata& val);
 
-  void __set_tbl_names(const std::vector<std::string> & val);
+  void __set_validTxnList(const std::string& val);
 
   bool operator == (const ThriftHiveMetastore_get_materialization_invalidation_info_args & rhs) const
   {
-    if (!(dbname == rhs.dbname))
+    if (!(creation_metadata == rhs.creation_metadata))
       return false;
-    if (!(tbl_names == rhs.tbl_names))
+    if (!(validTxnList == rhs.validTxnList))
       return false;
     return true;
   }
@@ -6179,8 +6179,8 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_pargs {
 
 
   virtual ~ThriftHiveMetastore_get_materialization_invalidation_info_pargs() throw();
-  const std::string* dbname;
-  const std::vector<std::string> * tbl_names;
+  const CreationMetadata* creation_metadata;
+  const std::string* validTxnList;
 
   uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
 
@@ -6203,14 +6203,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
   }
 
   virtual ~ThriftHiveMetastore_get_materialization_invalidation_info_result() throw();
-  std::map<std::string, Materialization>  success;
+  Materialization success;
   MetaException o1;
   InvalidOperationException o2;
   UnknownDBException o3;
 
   _ThriftHiveMetastore_get_materialization_invalidation_info_result__isset __isset;
 
-  void __set_success(const std::map<std::string, Materialization> & val);
+  void __set_success(const Materialization& val);
 
   void __set_o1(const MetaException& val);
 
@@ -6254,7 +6254,7 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult {
 
 
   virtual ~ThriftHiveMetastore_get_materialization_invalidation_info_presult() throw();
-  std::map<std::string, Materialization> * success;
+  Materialization* success;
   MetaException o1;
   InvalidOperationException o2;
   UnknownDBException o3;
@@ -26460,9 +26460,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public
   void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
   void send_get_table_objects_by_name_req(const GetTablesRequest& req);
   void recv_get_table_objects_by_name_req(GetTablesResult& _return);
-  void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
-  void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector<std::string> & tbl_names);
-  void recv_get_materialization_invalidation_info(std::map<std::string, Materialization> & _return);
+  void get_materialization_invalidation_info(Materialization& _return, const CreationMetadata& creation_metadata, const std::string& validTxnList);
+  void send_get_materialization_invalidation_info(const CreationMetadata& creation_metadata, const std::string& validTxnList);
+  void recv_get_materialization_invalidation_info(Materialization& _return);
   void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
   void send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
   void recv_update_creation_metadata();
@@ -27826,13 +27826,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi
     return;
   }
 
-  void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) {
+  void get_materialization_invalidation_info(Materialization& _return, const CreationMetadata& creation_metadata, const std::string& validTxnList) {
     size_t sz = ifaces_.size();
     size_t i = 0;
     for (; i < (sz - 1); ++i) {
-      ifaces_[i]->get_materialization_invalidation_info(_return, dbname, tbl_names);
+      ifaces_[i]->get_materialization_invalidation_info(_return, creation_metadata, validTxnList);
     }
-    ifaces_[i]->get_materialization_invalidation_info(_return, dbname, tbl_names);
+    ifaces_[i]->get_materialization_invalidation_info(_return, creation_metadata, validTxnList);
     return;
   }
 
@@ -29562,9 +29562,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf
   void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req);
   int32_t send_get_table_objects_by_name_req(const GetTablesRequest& req);
   void recv_get_table_objects_by_name_req(GetTablesResult& _return, const int32_t seqid);
-  void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names);
-  int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector<std::string> & tbl_names);
-  void recv_get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const int32_t seqid);
+  void get_materialization_invalidation_info(Materialization& _return, const CreationMetadata& creation_metadata, const std::string& validTxnList);
+  int32_t send_get_materialization_invalidation_info(const CreationMetadata& creation_metadata, const std::string& validTxnList);
+  void recv_get_materialization_invalidation_info(Materialization& _return, const int32_t seqid);
   void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
   int32_t send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata);
   void recv_update_creation_metadata(const int32_t seqid);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
index 789c150..5819b17 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -237,7 +237,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
     printf("get_table_objects_by_name_req\n");
   }
 
-  void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) {
+  void get_materialization_invalidation_info(Materialization& _return, const CreationMetadata& creation_metadata, const std::string& validTxnList) {
     // Your implementation goes here
     printf("get_materialization_invalidation_info\n");
   }


[18/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 031e72b..1285c08 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -14681,6 +14681,7 @@ class CreationMetadata:
    - tblName
    - tablesUsed
    - validTxnList
+   - materializationTime
   """
 
   thrift_spec = (
@@ -14690,14 +14691,16 @@ class CreationMetadata:
     (3, TType.STRING, 'tblName', None, None, ), # 3
     (4, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 4
     (5, TType.STRING, 'validTxnList', None, None, ), # 5
+    (6, TType.I64, 'materializationTime', None, None, ), # 6
   )
 
-  def __init__(self, catName=None, dbName=None, tblName=None, tablesUsed=None, validTxnList=None,):
+  def __init__(self, catName=None, dbName=None, tblName=None, tablesUsed=None, validTxnList=None, materializationTime=None,):
     self.catName = catName
     self.dbName = dbName
     self.tblName = tblName
     self.tablesUsed = tablesUsed
     self.validTxnList = validTxnList
+    self.materializationTime = materializationTime
 
   def read(self, iprot):
     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
@@ -14738,6 +14741,11 @@ class CreationMetadata:
           self.validTxnList = iprot.readString()
         else:
           iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.I64:
+          self.materializationTime = iprot.readI64()
+        else:
+          iprot.skip(ftype)
       else:
         iprot.skip(ftype)
       iprot.readFieldEnd()
@@ -14771,6 +14779,10 @@ class CreationMetadata:
       oprot.writeFieldBegin('validTxnList', TType.STRING, 5)
       oprot.writeString(self.validTxnList)
       oprot.writeFieldEnd()
+    if self.materializationTime is not None:
+      oprot.writeFieldBegin('materializationTime', TType.I64, 6)
+      oprot.writeI64(self.materializationTime)
+      oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
@@ -14793,6 +14805,7 @@ class CreationMetadata:
     value = (value * 31) ^ hash(self.tblName)
     value = (value * 31) ^ hash(self.tablesUsed)
     value = (value * 31) ^ hash(self.validTxnList)
+    value = (value * 31) ^ hash(self.materializationTime)
     return value
 
   def __repr__(self):
@@ -17613,24 +17626,15 @@ class TableMeta:
 class Materialization:
   """
   Attributes:
-   - tablesUsed
-   - validTxnList
-   - invalidationTime
    - sourceTablesUpdateDeleteModified
   """
 
   thrift_spec = (
     None, # 0
-    (1, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 1
-    (2, TType.STRING, 'validTxnList', None, None, ), # 2
-    (3, TType.I64, 'invalidationTime', None, None, ), # 3
-    (4, TType.BOOL, 'sourceTablesUpdateDeleteModified', None, None, ), # 4
+    (1, TType.BOOL, 'sourceTablesUpdateDeleteModified', None, None, ), # 1
   )
 
-  def __init__(self, tablesUsed=None, validTxnList=None, invalidationTime=None, sourceTablesUpdateDeleteModified=None,):
-    self.tablesUsed = tablesUsed
-    self.validTxnList = validTxnList
-    self.invalidationTime = invalidationTime
+  def __init__(self, sourceTablesUpdateDeleteModified=None,):
     self.sourceTablesUpdateDeleteModified = sourceTablesUpdateDeleteModified
 
   def read(self, iprot):
@@ -17643,26 +17647,6 @@ class Materialization:
       if ftype == TType.STOP:
         break
       if fid == 1:
-        if ftype == TType.SET:
-          self.tablesUsed = set()
-          (_etype763, _size760) = iprot.readSetBegin()
-          for _i764 in xrange(_size760):
-            _elem765 = iprot.readString()
-            self.tablesUsed.add(_elem765)
-          iprot.readSetEnd()
-        else:
-          iprot.skip(ftype)
-      elif fid == 2:
-        if ftype == TType.STRING:
-          self.validTxnList = iprot.readString()
-        else:
-          iprot.skip(ftype)
-      elif fid == 3:
-        if ftype == TType.I64:
-          self.invalidationTime = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
         if ftype == TType.BOOL:
           self.sourceTablesUpdateDeleteModified = iprot.readBool()
         else:
@@ -17677,39 +17661,21 @@ class Materialization:
       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
       return
     oprot.writeStructBegin('Materialization')
-    if self.tablesUsed is not None:
-      oprot.writeFieldBegin('tablesUsed', TType.SET, 1)
-      oprot.writeSetBegin(TType.STRING, len(self.tablesUsed))
-      for iter766 in self.tablesUsed:
-        oprot.writeString(iter766)
-      oprot.writeSetEnd()
-      oprot.writeFieldEnd()
-    if self.validTxnList is not None:
-      oprot.writeFieldBegin('validTxnList', TType.STRING, 2)
-      oprot.writeString(self.validTxnList)
-      oprot.writeFieldEnd()
-    if self.invalidationTime is not None:
-      oprot.writeFieldBegin('invalidationTime', TType.I64, 3)
-      oprot.writeI64(self.invalidationTime)
-      oprot.writeFieldEnd()
     if self.sourceTablesUpdateDeleteModified is not None:
-      oprot.writeFieldBegin('sourceTablesUpdateDeleteModified', TType.BOOL, 4)
+      oprot.writeFieldBegin('sourceTablesUpdateDeleteModified', TType.BOOL, 1)
       oprot.writeBool(self.sourceTablesUpdateDeleteModified)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
     oprot.writeStructEnd()
 
   def validate(self):
-    if self.tablesUsed is None:
-      raise TProtocol.TProtocolException(message='Required field tablesUsed is unset!')
+    if self.sourceTablesUpdateDeleteModified is None:
+      raise TProtocol.TProtocolException(message='Required field sourceTablesUpdateDeleteModified is unset!')
     return
 
 
   def __hash__(self):
     value = 17
-    value = (value * 31) ^ hash(self.tablesUsed)
-    value = (value * 31) ^ hash(self.validTxnList)
-    value = (value * 31) ^ hash(self.invalidationTime)
     value = (value * 31) ^ hash(self.sourceTablesUpdateDeleteModified)
     return value
 
@@ -18586,44 +18552,44 @@ class WMFullResourcePlan:
       elif fid == 2:
         if ftype == TType.LIST:
           self.pools = []
-          (_etype770, _size767) = iprot.readListBegin()
-          for _i771 in xrange(_size767):
-            _elem772 = WMPool()
-            _elem772.read(iprot)
-            self.pools.append(_elem772)
+          (_etype763, _size760) = iprot.readListBegin()
+          for _i764 in xrange(_size760):
+            _elem765 = WMPool()
+            _elem765.read(iprot)
+            self.pools.append(_elem765)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.LIST:
           self.mappings = []
-          (_etype776, _size773) = iprot.readListBegin()
-          for _i777 in xrange(_size773):
-            _elem778 = WMMapping()
-            _elem778.read(iprot)
-            self.mappings.append(_elem778)
+          (_etype769, _size766) = iprot.readListBegin()
+          for _i770 in xrange(_size766):
+            _elem771 = WMMapping()
+            _elem771.read(iprot)
+            self.mappings.append(_elem771)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.LIST:
           self.triggers = []
-          (_etype782, _size779) = iprot.readListBegin()
-          for _i783 in xrange(_size779):
-            _elem784 = WMTrigger()
-            _elem784.read(iprot)
-            self.triggers.append(_elem784)
+          (_etype775, _size772) = iprot.readListBegin()
+          for _i776 in xrange(_size772):
+            _elem777 = WMTrigger()
+            _elem777.read(iprot)
+            self.triggers.append(_elem777)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 5:
         if ftype == TType.LIST:
           self.poolTriggers = []
-          (_etype788, _size785) = iprot.readListBegin()
-          for _i789 in xrange(_size785):
-            _elem790 = WMPoolTrigger()
-            _elem790.read(iprot)
-            self.poolTriggers.append(_elem790)
+          (_etype781, _size778) = iprot.readListBegin()
+          for _i782 in xrange(_size778):
+            _elem783 = WMPoolTrigger()
+            _elem783.read(iprot)
+            self.poolTriggers.append(_elem783)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -18644,29 +18610,29 @@ class WMFullResourcePlan:
     if self.pools is not None:
       oprot.writeFieldBegin('pools', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.pools))
-      for iter791 in self.pools:
-        iter791.write(oprot)
+      for iter784 in self.pools:
+        iter784.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.mappings is not None:
       oprot.writeFieldBegin('mappings', TType.LIST, 3)
       oprot.writeListBegin(TType.STRUCT, len(self.mappings))
-      for iter792 in self.mappings:
-        iter792.write(oprot)
+      for iter785 in self.mappings:
+        iter785.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.triggers is not None:
       oprot.writeFieldBegin('triggers', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.triggers))
-      for iter793 in self.triggers:
-        iter793.write(oprot)
+      for iter786 in self.triggers:
+        iter786.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.poolTriggers is not None:
       oprot.writeFieldBegin('poolTriggers', TType.LIST, 5)
       oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers))
-      for iter794 in self.poolTriggers:
-        iter794.write(oprot)
+      for iter787 in self.poolTriggers:
+        iter787.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19140,11 +19106,11 @@ class WMGetAllResourcePlanResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.resourcePlans = []
-          (_etype798, _size795) = iprot.readListBegin()
-          for _i799 in xrange(_size795):
-            _elem800 = WMResourcePlan()
-            _elem800.read(iprot)
-            self.resourcePlans.append(_elem800)
+          (_etype791, _size788) = iprot.readListBegin()
+          for _i792 in xrange(_size788):
+            _elem793 = WMResourcePlan()
+            _elem793.read(iprot)
+            self.resourcePlans.append(_elem793)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19161,8 +19127,8 @@ class WMGetAllResourcePlanResponse:
     if self.resourcePlans is not None:
       oprot.writeFieldBegin('resourcePlans', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans))
-      for iter801 in self.resourcePlans:
-        iter801.write(oprot)
+      for iter794 in self.resourcePlans:
+        iter794.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -19466,20 +19432,20 @@ class WMValidateResourcePlanResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.errors = []
-          (_etype805, _size802) = iprot.readListBegin()
-          for _i806 in xrange(_size802):
-            _elem807 = iprot.readString()
-            self.errors.append(_elem807)
+          (_etype798, _size795) = iprot.readListBegin()
+          for _i799 in xrange(_size795):
+            _elem800 = iprot.readString()
+            self.errors.append(_elem800)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.warnings = []
-          (_etype811, _size808) = iprot.readListBegin()
-          for _i812 in xrange(_size808):
-            _elem813 = iprot.readString()
-            self.warnings.append(_elem813)
+          (_etype804, _size801) = iprot.readListBegin()
+          for _i805 in xrange(_size801):
+            _elem806 = iprot.readString()
+            self.warnings.append(_elem806)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -19496,15 +19462,15 @@ class WMValidateResourcePlanResponse:
     if self.errors is not None:
       oprot.writeFieldBegin('errors', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.errors))
-      for iter814 in self.errors:
-        oprot.writeString(iter814)
+      for iter807 in self.errors:
+        oprot.writeString(iter807)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.warnings is not None:
       oprot.writeFieldBegin('warnings', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.warnings))
-      for iter815 in self.warnings:
-        oprot.writeString(iter815)
+      for iter808 in self.warnings:
+        oprot.writeString(iter808)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -20081,11 +20047,11 @@ class WMGetTriggersForResourePlanResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.triggers = []
-          (_etype819, _size816) = iprot.readListBegin()
-          for _i820 in xrange(_size816):
-            _elem821 = WMTrigger()
-            _elem821.read(iprot)
-            self.triggers.append(_elem821)
+          (_etype812, _size809) = iprot.readListBegin()
+          for _i813 in xrange(_size809):
+            _elem814 = WMTrigger()
+            _elem814.read(iprot)
+            self.triggers.append(_elem814)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -20102,8 +20068,8 @@ class WMGetTriggersForResourePlanResponse:
     if self.triggers is not None:
       oprot.writeFieldBegin('triggers', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.triggers))
-      for iter822 in self.triggers:
-        iter822.write(oprot)
+      for iter815 in self.triggers:
+        iter815.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -21287,11 +21253,11 @@ class SchemaVersion:
       elif fid == 4:
         if ftype == TType.LIST:
           self.cols = []
-          (_etype826, _size823) = iprot.readListBegin()
-          for _i827 in xrange(_size823):
-            _elem828 = FieldSchema()
-            _elem828.read(iprot)
-            self.cols.append(_elem828)
+          (_etype819, _size816) = iprot.readListBegin()
+          for _i820 in xrange(_size816):
+            _elem821 = FieldSchema()
+            _elem821.read(iprot)
+            self.cols.append(_elem821)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21351,8 +21317,8 @@ class SchemaVersion:
     if self.cols is not None:
       oprot.writeFieldBegin('cols', TType.LIST, 4)
       oprot.writeListBegin(TType.STRUCT, len(self.cols))
-      for iter829 in self.cols:
-        iter829.write(oprot)
+      for iter822 in self.cols:
+        iter822.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.state is not None:
@@ -21607,11 +21573,11 @@ class FindSchemasByColsResp:
       if fid == 1:
         if ftype == TType.LIST:
           self.schemaVersions = []
-          (_etype833, _size830) = iprot.readListBegin()
-          for _i834 in xrange(_size830):
-            _elem835 = SchemaVersionDescriptor()
-            _elem835.read(iprot)
-            self.schemaVersions.append(_elem835)
+          (_etype826, _size823) = iprot.readListBegin()
+          for _i827 in xrange(_size823):
+            _elem828 = SchemaVersionDescriptor()
+            _elem828.read(iprot)
+            self.schemaVersions.append(_elem828)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -21628,8 +21594,8 @@ class FindSchemasByColsResp:
     if self.schemaVersions is not None:
       oprot.writeFieldBegin('schemaVersions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions))
-      for iter836 in self.schemaVersions:
-        iter836.write(oprot)
+      for iter829 in self.schemaVersions:
+        iter829.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 0348ff2..a0fabfe 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -3277,13 +3277,15 @@ class CreationMetadata
   TBLNAME = 3
   TABLESUSED = 4
   VALIDTXNLIST = 5
+  MATERIALIZATIONTIME = 6
 
   FIELDS = {
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
     TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}},
-    VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList', :optional => true}
+    VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList', :optional => true},
+    MATERIALIZATIONTIME => {:type => ::Thrift::Types::I64, :name => 'materializationTime', :optional => true}
   }
 
   def struct_fields; FIELDS; end
@@ -3952,22 +3954,16 @@ end
 
 class Materialization
   include ::Thrift::Struct, ::Thrift::Struct_Union
-  TABLESUSED = 1
-  VALIDTXNLIST = 2
-  INVALIDATIONTIME = 3
-  SOURCETABLESUPDATEDELETEMODIFIED = 4
+  SOURCETABLESUPDATEDELETEMODIFIED = 1
 
   FIELDS = {
-    TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}},
-    VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList', :optional => true},
-    INVALIDATIONTIME => {:type => ::Thrift::Types::I64, :name => 'invalidationTime', :optional => true},
-    SOURCETABLESUPDATEDELETEMODIFIED => {:type => ::Thrift::Types::BOOL, :name => 'sourceTablesUpdateDeleteModified', :optional => true}
+    SOURCETABLESUPDATEDELETEMODIFIED => {:type => ::Thrift::Types::BOOL, :name => 'sourceTablesUpdateDeleteModified'}
   }
 
   def struct_fields; FIELDS; end
 
   def validate
-    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablesUsed is unset!') unless @tablesUsed
+    raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field sourceTablesUpdateDeleteModified is unset!') if @sourceTablesUpdateDeleteModified.nil?
   end
 
   ::Thrift::Struct.generate_accessors self

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 2bd958e..5ecfbed 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -726,13 +726,13 @@ module ThriftHiveMetastore
       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_objects_by_name_req failed: unknown result')
     end
 
-    def get_materialization_invalidation_info(dbname, tbl_names)
-      send_get_materialization_invalidation_info(dbname, tbl_names)
+    def get_materialization_invalidation_info(creation_metadata, validTxnList)
+      send_get_materialization_invalidation_info(creation_metadata, validTxnList)
       return recv_get_materialization_invalidation_info()
     end
 
-    def send_get_materialization_invalidation_info(dbname, tbl_names)
-      send_message('get_materialization_invalidation_info', Get_materialization_invalidation_info_args, :dbname => dbname, :tbl_names => tbl_names)
+    def send_get_materialization_invalidation_info(creation_metadata, validTxnList)
+      send_message('get_materialization_invalidation_info', Get_materialization_invalidation_info_args, :creation_metadata => creation_metadata, :validTxnList => validTxnList)
     end
 
     def recv_get_materialization_invalidation_info()
@@ -4043,7 +4043,7 @@ module ThriftHiveMetastore
       args = read_args(iprot, Get_materialization_invalidation_info_args)
       result = Get_materialization_invalidation_info_result.new()
       begin
-        result.success = @handler.get_materialization_invalidation_info(args.dbname, args.tbl_names)
+        result.success = @handler.get_materialization_invalidation_info(args.creation_metadata, args.validTxnList)
       rescue ::MetaException => o1
         result.o1 = o1
       rescue ::InvalidOperationException => o2
@@ -7654,12 +7654,12 @@ module ThriftHiveMetastore
 
   class Get_materialization_invalidation_info_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
-    DBNAME = 1
-    TBL_NAMES = 2
+    CREATION_METADATA = 1
+    VALIDTXNLIST = 2
 
     FIELDS = {
-      DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
-      TBL_NAMES => {:type => ::Thrift::Types::LIST, :name => 'tbl_names', :element => {:type => ::Thrift::Types::STRING}}
+      CREATION_METADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creation_metadata', :class => ::CreationMetadata},
+      VALIDTXNLIST => {:type => ::Thrift::Types::STRING, :name => 'validTxnList'}
     }
 
     def struct_fields; FIELDS; end
@@ -7678,7 +7678,7 @@ module ThriftHiveMetastore
     O3 = 3
 
     FIELDS = {
-      SUCCESS => {:type => ::Thrift::Types::MAP, :name => 'success', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRUCT, :class => ::Materialization}},
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::Materialization},
       O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException},
       O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException},
       O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::UnknownDBException}

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 8d88749..e6f7333 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -3009,8 +3009,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
-    public Map<String, Materialization> get_materialization_invalidation_info(final String dbName, final List<String> tableNames) {
-      return MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(dbName, tableNames);
+    public Materialization get_materialization_invalidation_info(final CreationMetadata cm, final String validTxnList) throws MetaException {
+      return getTxnHandler().getMaterializationInvalidationInfo(cm, validTxnList);
     }
 
     @Override
@@ -8670,13 +8670,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public LockResponse get_lock_materialization_rebuild(String dbName, String tableName, long txnId)
         throws TException {
-      return MaterializationsRebuildLockHandler.get().lockResource(dbName, tableName, txnId);
+      return getTxnHandler().lockMaterializationRebuild(dbName, tableName, txnId);
     }
 
     @Override
     public boolean heartbeat_lock_materialization_rebuild(String dbName, String tableName, long txnId)
         throws TException {
-      return MaterializationsRebuildLockHandler.get().refreshLockResource(dbName, tableName, txnId);
+      return getTxnHandler().heartbeatLockMaterializationRebuild(dbName, tableName, txnId);
     }
 
     @Override
@@ -8992,8 +8992,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           false);
       IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
 
-      // Initialize materializations invalidation cache
-      MaterializationsInvalidationCache.get().init(conf, handler);
       TServerSocket serverSocket;
 
       if (useSasl) {

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index bfd7141..acdb73b 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -167,8 +167,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
       // instantiate the metastore server handler directly instead of connecting
       // through the network
       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
-      // Initialize materializations invalidation cache (only for local metastore)
-      MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client);
       isConnected = true;
       snapshotActiveConf();
       return;
@@ -1610,10 +1608,9 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   }
 
   @Override
-  public Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+  public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
       throws MetaException, InvalidOperationException, UnknownDBException, TException {
-    return client.get_materialization_invalidation_info(
-        dbName, filterHook.filterTableNames(getDefaultCatalog(conf), dbName, viewNames));
+    return client.get_materialization_invalidation_info(cm, validTxnList);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index b5d147b..9661beb 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -775,7 +775,7 @@ public interface IMetaStoreClient {
   /**
    * Returns the invalidation information for the materialized views given as input.
    */
-  Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+  Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
       throws MetaException, InvalidOperationException, UnknownDBException, TException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsCacheCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsCacheCleanerTask.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsCacheCleanerTask.java
deleted file mode 100644
index cc168a9..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsCacheCleanerTask.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.concurrent.TimeUnit;
-
-/**
- * Task responsible for cleaning the transactions that are not useful from the
- * materializations cache.
- */
-public class MaterializationsCacheCleanerTask implements MetastoreTaskThread {
-  private static final Logger LOG = LoggerFactory.getLogger(MaterializationsCacheCleanerTask.class);
-
-  private Configuration conf;
-
-  @Override
-  public long runFrequency(TimeUnit unit) {
-    return MetastoreConf.getTimeVar(conf,
-        MetastoreConf.ConfVars.MATERIALIZATIONS_INVALIDATION_CACHE_CLEAN_FREQUENCY, unit);
-  }
-
-  @Override
-  public void setConf(Configuration configuration) {
-    conf = configuration;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public void run() {
-    long removedCnt = MaterializationsInvalidationCache.get().cleanup(System.currentTimeMillis() -
-        MetastoreConf.getTimeVar(conf,
-            MetastoreConf.ConfVars.MATERIALIZATIONS_INVALIDATION_CACHE_EXPIRY_DURATION, TimeUnit.MILLISECONDS));
-    if (removedCnt > 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Number of transaction entries deleted from materializations cache: " + removedCnt);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
deleted file mode 100644
index fc644f0..0000000
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-
-import org.apache.hadoop.conf.Configuration;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Multimap;
-import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-/**
- * This cache keeps information in memory about the table modifications so materialized views
- * can verify their invalidation time, i.e., the moment after materialization on which the
- * first transaction to the tables they used happened. This information is kept in memory
- * to check the invalidation quickly. However, we store enough information in the metastore
- * to bring this cache up if the metastore is restarted or would crashed. This cache lives
- * in the metastore server.
- */
-public final class MaterializationsInvalidationCache {
-
-  private static final Logger LOG = LoggerFactory.getLogger(MaterializationsInvalidationCache.class);
-
-  /* Singleton */
-  private static final MaterializationsInvalidationCache SINGLETON = new MaterializationsInvalidationCache();
-
-  /* If this boolean is true, this class has no functionality. Only for debugging purposes. */
-  private boolean disable;
-
-  /* Key is the database name. Each value is a map from the unique view qualified name to
-   * the materialization invalidation info. This invalidation object contains information
-   * such as the tables used by the materialized view, whether there was any update or
-   * delete in the source tables since the materialized view was created or rebuilt,
-   * or the invalidation time, i.e., first modification of the tables used by materialized
-   * view after the view was created. */
-  private final ConcurrentMap<String, ConcurrentMap<String, Materialization>> materializations =
-      new ConcurrentHashMap<>();
-
-  /*
-   * Key is a qualified table name. The value is a (sorted) tree map (supporting concurrent
-   * modifications) that will keep the modifications for a given table in the order of their
-   * transaction id. This is useful to quickly check the invalidation time for a given
-   * materialization.
-   */
-  private final ConcurrentMap<String, ConcurrentSkipListMap<Long, Long>> tableModifications =
-      new ConcurrentHashMap<>();
-
-  private final ConcurrentMap<String, ConcurrentSkipListSet<Long>> updateDeleteTableModifications =
-      new ConcurrentHashMap<>();
-
-  /* Whether the cache has been initialized or not. */
-  private boolean initialized;
-  /* Configuration for cache. */
-  private Configuration conf;
-  /* Handler to connect to metastore. */
-  private IHMSHandler handler;
-
-  private MaterializationsInvalidationCache() {
-  }
-
-  /**
-   * Get instance of MaterializationsInvalidationCache.
-   *
-   * @return the singleton
-   */
-  public static MaterializationsInvalidationCache get() {
-    return SINGLETON;
-  }
-
-  /**
-   * Initialize the invalidation cache.
-   *
-   * The method is synchronized because we want to avoid initializing the invalidation cache
-   * multiple times in embedded mode. This will not happen when we run the metastore remotely
-   * as the method is called only once.
-   */
-  public synchronized void init(Configuration conf, IHMSHandler handler) {
-    this.conf = conf;
-    this.handler = handler;
-
-    // This will only be true for debugging purposes
-    this.disable = MetastoreConf.getVar(conf,
-        MetastoreConf.ConfVars.MATERIALIZATIONS_INVALIDATION_CACHE_IMPL).equals("DISABLE");
-    if (disable) {
-      // Nothing to do
-      return;
-    }
-
-    if (!initialized) {
-      this.initialized = true;
-      ExecutorService pool = Executors.newCachedThreadPool();
-      pool.submit(new Loader());
-      pool.shutdown();
-    }
-  }
-
-  private class Loader implements Runnable {
-    @Override
-    public void run() {
-      try {
-        RawStore store = handler.getMS();
-        for (String catName : store.getCatalogs()) {
-          for (String dbName : store.getAllDatabases(catName)) {
-            for (Table mv : store.getTableObjectsByName(catName, dbName,
-                store.getTables(catName, dbName, null, TableType.MATERIALIZED_VIEW))) {
-              addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()),
-                  mv.getCreationMetadata().getValidTxnList(), OpType.LOAD);
-            }
-          }
-        }
-        LOG.info("Initialized materializations invalidation cache");
-      } catch (Exception e) {
-        LOG.error("Problem connecting to the metastore when initializing the view registry");
-      }
-    }
-  }
-
-  /**
-   * Adds a newly created materialized view to the cache.
-   *
-   * @param dbName
-   * @param tableName
-   * @param tablesUsed tables used by the materialized view
-   * @param validTxnList
-   */
-  public void createMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
-      String validTxnList) {
-    addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.CREATE);
-  }
-
-  /**
-   * Method to call when materialized view is modified.
-   *
-   * @param dbName
-   * @param tableName
-   * @param tablesUsed tables used by the materialized view
-   * @param validTxnList
-   */
-  public void alterMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
-      String validTxnList) {
-    addMaterializedView(dbName, tableName, tablesUsed, validTxnList, OpType.ALTER);
-  }
-
-  /**
-   * Adds the materialized view to the cache.
-   *
-   * @param dbName
-   * @param tableName
-   * @param tablesUsed tables used by the materialized view
-   * @param validTxnList
-   * @param opType
-   */
-  private void addMaterializedView(String dbName, String tableName, Set<String> tablesUsed,
-      String validTxnList, OpType opType) {
-    if (disable) {
-      // Nothing to do
-      return;
-    }
-    // We are going to create the map for each view in the given database
-    ConcurrentMap<String, Materialization> cq =
-        new ConcurrentHashMap<String, Materialization>();
-    final ConcurrentMap<String, Materialization> prevCq = materializations.putIfAbsent(
-        dbName, cq);
-    if (prevCq != null) {
-      cq = prevCq;
-    }
-    // Start the process to add materialization to the cache
-    // Before loading the materialization in the cache, we need to update some
-    // important information in the registry to account for rewriting invalidation
-    if (validTxnList == null) {
-      // This can happen when the materialized view was created on non-transactional tables
-      return;
-    }
-    if (opType == OpType.CREATE || opType == OpType.ALTER) {
-      // You store the materialized view
-      Materialization materialization = new Materialization(tablesUsed);
-      materialization.setValidTxnList(validTxnList);
-      cq.put(tableName, materialization);
-    } else {
-      ValidTxnWriteIdList txnList = new ValidTxnWriteIdList(validTxnList);
-      for (String qNameTableUsed : tablesUsed) {
-        ValidWriteIdList tableTxnList = txnList.getTableValidWriteIdList(qNameTableUsed);
-        // First we insert a new tree set to keep table modifications, unless it already exists
-        ConcurrentSkipListMap<Long, Long> modificationsTree = new ConcurrentSkipListMap<>();
-        final ConcurrentSkipListMap<Long, Long> prevModificationsTree = tableModifications.putIfAbsent(
-                qNameTableUsed, modificationsTree);
-        if (prevModificationsTree != null) {
-          modificationsTree = prevModificationsTree;
-        }
-        // If we are not creating the MV at this instant, but instead it was created previously
-        // and we are loading it into the cache, we need to go through the transaction entries and
-        // check if the MV is still valid.
-        try {
-          String[] names =  qNameTableUsed.split("\\.");
-          BasicTxnInfo e = handler.getTxnHandler().getFirstCompletedTransactionForTableAfterCommit(
-                  names[0], names[1], tableTxnList);
-          if (!e.isIsnull()) {
-            modificationsTree.put(e.getTxnid(), e.getTime());
-            // We do not need to do anything more for current table, as we detected
-            // a modification event that was in the metastore.
-            continue;
-          }
-        } catch (MetaException ex) {
-          LOG.debug("Materialized view " + Warehouse.getQualifiedName(dbName, tableName) +
-                  " ignored; error loading view into invalidation cache", ex);
-          return;
-        }
-      }
-      // For LOAD, you only add it if it does exist as you might be loading an outdated MV
-      Materialization materialization = new Materialization(tablesUsed);
-      materialization.setValidTxnList(validTxnList);
-      cq.putIfAbsent(tableName, materialization);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Cached materialized view for rewriting in invalidation cache: " +
-          Warehouse.getQualifiedName(dbName, tableName));
-    }
-  }
-
-  /**
-   * This method is called when a table is modified. That way we can keep track of the
-   * invalidation for the MVs that use that table.
-   */
-  public void notifyTableModification(String dbName, String tableName,
-      long txnId, long newModificationTime, boolean isUpdateDelete) {
-    if (disable) {
-      // Nothing to do
-      return;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Notification for table {} in database {} received -> id: {}, time: {}",
-          tableName, dbName, txnId, newModificationTime);
-    }
-    if (isUpdateDelete) {
-      // We update first the update/delete modifications record
-      ConcurrentSkipListSet<Long> modificationsSet = new ConcurrentSkipListSet<>();
-      final ConcurrentSkipListSet<Long> prevModificationsSet =
-          updateDeleteTableModifications.putIfAbsent(Warehouse.getQualifiedName(dbName, tableName),
-              modificationsSet);
-      if (prevModificationsSet != null) {
-        modificationsSet = prevModificationsSet;
-      }
-      modificationsSet.add(txnId);
-    }
-    ConcurrentSkipListMap<Long, Long> modificationsTree = new ConcurrentSkipListMap<>();
-    final ConcurrentSkipListMap<Long, Long> prevModificationsTree =
-        tableModifications.putIfAbsent(Warehouse.getQualifiedName(dbName, tableName), modificationsTree);
-    if (prevModificationsTree != null) {
-      modificationsTree = prevModificationsTree;
-    }
-    modificationsTree.put(txnId, newModificationTime);
-  }
-
-  /**
-   * Removes the materialized view from the cache.
-   *
-   * @param dbName
-   * @param tableName
-   */
-  public void dropMaterializedView(String dbName, String tableName) {
-    if (disable) {
-      // Nothing to do
-      return;
-    }
-    materializations.get(dbName).remove(tableName);
-  }
-
-  /**
-   * Returns the materialized views in the cache for the given database.
-   *
-   * @param dbName the database
-   * @return the collection of materialized views, or the empty collection if none
-   */
-  public Map<String, Materialization> getMaterializationInvalidationInfo(
-      String dbName, List<String> materializationNames) {
-    if (materializations.get(dbName) != null) {
-      ImmutableMap.Builder<String, Materialization> m = ImmutableMap.builder();
-      for (String materializationName : materializationNames) {
-        Materialization materialization =
-            materializations.get(dbName).get(materializationName);
-        if (materialization == null) {
-          LOG.debug("Materialization {} skipped as there is no information "
-              + "in the invalidation cache about it", materializationName);
-          continue;
-        }
-        // We create a deep copy of the materialization, as we need to set the time
-        // and whether any update/delete operation happen on the tables that it uses
-        // since it was created.
-        Materialization materializationCopy = new Materialization(
-            materialization.getTablesUsed());
-        materializationCopy.setValidTxnList(materialization.getValidTxnList());
-        enrichWithInvalidationInfo(materializationCopy);
-        m.put(materializationName, materializationCopy);
-      }
-      Map<String, Materialization> result = m.build();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Retrieved the following materializations from the invalidation cache: {}", result);
-      }
-      return result;
-    }
-    return ImmutableMap.of();
-  }
-
-  private void enrichWithInvalidationInfo(Materialization materialization) {
-    String materializationTxnListString = materialization.getValidTxnList();
-    if (materializationTxnListString == null) {
-      // This can happen when the materialization was created on non-transactional tables
-      materialization.setInvalidationTime(Long.MIN_VALUE);
-      return;
-    }
-
-    // We will obtain the modification time as follows.
-    // First, we obtain the first element after high watermark (if any)
-    // Then, we iterate through the elements from min open txn till high
-    // watermark, updating the modification time after creation if needed
-    ValidTxnWriteIdList materializationTxnList = new ValidTxnWriteIdList(materializationTxnListString);
-    long firstModificationTimeAfterCreation = 0L;
-    boolean containsUpdateDelete = false;
-    for (String qNameTableUsed : materialization.getTablesUsed()) {
-      final ValidWriteIdList tableMaterializationTxnList =
-          materializationTxnList.getTableValidWriteIdList(qNameTableUsed);
-
-      final ConcurrentSkipListMap<Long, Long> usedTableModifications =
-          tableModifications.get(qNameTableUsed);
-      if (usedTableModifications == null) {
-        // This is not necessarily an error, since the table may be empty. To be safe,
-        // instead of including this materialized view, we just log the information and
-        // skip it (if table is really empty, it will not matter for performance anyway).
-        LOG.warn("No information found in invalidation cache for table {}, possible tables are: {}",
-            qNameTableUsed, tableModifications.keySet());
-        materialization.setInvalidationTime(Long.MIN_VALUE);
-        return;
-      }
-      final ConcurrentSkipListSet<Long> usedUDTableModifications =
-          updateDeleteTableModifications.get(qNameTableUsed);
-      final Entry<Long, Long> tn = usedTableModifications.higherEntry(tableMaterializationTxnList.getHighWatermark());
-      if (tn != null) {
-        if (firstModificationTimeAfterCreation == 0L ||
-            tn.getValue() < firstModificationTimeAfterCreation) {
-          firstModificationTimeAfterCreation = tn.getValue();
-        }
-        // Check if there was any update/delete after creation
-        containsUpdateDelete = usedUDTableModifications != null &&
-            !usedUDTableModifications.tailSet(tableMaterializationTxnList.getHighWatermark(), false).isEmpty();
-      }
-      // Min open txn might be null if there were no open transactions
-      // when this transaction was being executed
-      if (tableMaterializationTxnList.getMinOpenWriteId() != null) {
-        // Invalid transaction list is sorted
-        int pos = 0;
-        for (Map.Entry<Long, Long> t : usedTableModifications
-                .subMap(tableMaterializationTxnList.getMinOpenWriteId(), tableMaterializationTxnList.getHighWatermark()).entrySet()) {
-          while (pos < tableMaterializationTxnList.getInvalidWriteIds().length &&
-              tableMaterializationTxnList.getInvalidWriteIds()[pos] != t.getKey()) {
-            pos++;
-          }
-          if (pos >= tableMaterializationTxnList.getInvalidWriteIds().length) {
-            break;
-          }
-          if (firstModificationTimeAfterCreation == 0L ||
-              t.getValue() < firstModificationTimeAfterCreation) {
-            firstModificationTimeAfterCreation = t.getValue();
-          }
-          containsUpdateDelete = containsUpdateDelete ||
-              (usedUDTableModifications != null && usedUDTableModifications.contains(t.getKey()));
-        }
-      }
-    }
-
-    materialization.setInvalidationTime(firstModificationTimeAfterCreation);
-    materialization.setSourceTablesUpdateDeleteModified(containsUpdateDelete);
-  }
-
-  private enum OpType {
-    CREATE,
-    LOAD,
-    ALTER
-  }
-
-  /**
-   * Removes transaction events that are not relevant anymore.
-   * @param minTime events generated before this time (ms) can be deleted from the cache
-   * @return number of events that were deleted from the cache
-   */
-  public long cleanup(long minTime) {
-    // To remove, mv should meet two conditions:
-    // 1) Current time - time of transaction > config parameter, and
-    // 2) Transaction should not be associated with invalidation of a MV
-    if (disable || !initialized) {
-      // Bail out
-      return 0L;
-    }
-    // We execute the cleanup in two steps
-    // First we gather all the transactions that need to be kept
-    final Multimap<String, Long> keepTxnInfos = HashMultimap.create();
-    for (Map.Entry<String, ConcurrentMap<String, Materialization>> e : materializations.entrySet()) {
-      for (Materialization m : e.getValue().values()) {
-        ValidTxnWriteIdList txnList = new ValidTxnWriteIdList(m.getValidTxnList());
-        boolean canBeDeleted = false;
-        String currentTableForInvalidatingTxn = null;
-        long currentInvalidatingTxnId = 0L;
-        long currentInvalidatingTxnTime = 0L;
-        for (String qNameTableUsed : m.getTablesUsed()) {
-          ValidWriteIdList tableTxnList = txnList.getTableValidWriteIdList(qNameTableUsed);
-          final Entry<Long, Long> tn = tableModifications.get(qNameTableUsed)
-              .higherEntry(tableTxnList.getHighWatermark());
-          if (tn != null) {
-            if (currentInvalidatingTxnTime == 0L ||
-                tn.getValue() < currentInvalidatingTxnTime) {
-              // This transaction 1) is the first one examined for this materialization, or
-              // 2) it is the invalidating transaction. Hence we add it to the transactions to keep.
-              // 1.- We remove the previous invalidating transaction from the transactions
-              // to be kept (if needed).
-              if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
-                keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
-              }
-              // 2.- We add this transaction to the transactions that should be kept.
-              canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(tn.getKey());
-              keepTxnInfos.put(qNameTableUsed, tn.getKey());
-              // 3.- We record this transaction as the current invalidating transaction.
-              currentTableForInvalidatingTxn = qNameTableUsed;
-              currentInvalidatingTxnId = tn.getKey();
-              currentInvalidatingTxnTime = tn.getValue();
-            }
-          }
-          if (tableTxnList.getMinOpenWriteId() != null) {
-            // Invalid transaction list is sorted
-            int pos = 0;
-            for (Entry<Long, Long> t : tableModifications.get(qNameTableUsed)
-                .subMap(tableTxnList.getMinOpenWriteId(), tableTxnList.getHighWatermark()).entrySet()) {
-              while (pos < tableTxnList.getInvalidWriteIds().length &&
-                  tableTxnList.getInvalidWriteIds()[pos] != t.getKey()) {
-                pos++;
-              }
-              if (pos >= tableTxnList.getInvalidWriteIds().length) {
-                break;
-              }
-              if (currentInvalidatingTxnTime == 0L ||
-                  t.getValue() < currentInvalidatingTxnTime) {
-                // This transaction 1) is the first one examined for this materialization, or
-                // 2) it is the invalidating transaction. Hence we add it to the transactions to keep.
-                // 1.- We remove the previous invalidating transaction from the transactions
-                // to be kept (if needed).
-                if (canBeDeleted && currentInvalidatingTxnTime < minTime) {
-                  keepTxnInfos.remove(currentTableForInvalidatingTxn, currentInvalidatingTxnId);
-                }
-                // 2.- We add this transaction to the transactions that should be kept.
-                canBeDeleted = !keepTxnInfos.get(qNameTableUsed).contains(t.getKey());
-                keepTxnInfos.put(qNameTableUsed, t.getKey());
-                // 3.- We record this transaction as the current invalidating transaction.
-                currentTableForInvalidatingTxn = qNameTableUsed;
-                currentInvalidatingTxnId = t.getKey();
-                currentInvalidatingTxnTime = t.getValue();
-              }
-            }
-          }
-        }
-      }
-    }
-    // Second, we remove the transactions
-    long removed = 0L;
-    for (Entry<String, ConcurrentSkipListMap<Long, Long>> e : tableModifications.entrySet()) {
-      Collection<Long> c = keepTxnInfos.get(e.getKey());
-      ConcurrentSkipListSet<Long> updateDeleteForTable = updateDeleteTableModifications.get(e.getKey());
-      for (Iterator<Entry<Long, Long>> it = e.getValue().entrySet().iterator(); it.hasNext();) {
-        Entry<Long, Long> v = it.next();
-        // We need to check again the time because some of the transactions might not be explored
-        // above, e.g., transactions above the highest transaction mark for all the materialized
-        // views.
-        if (v.getValue() < minTime && (c.isEmpty() || !c.contains(v.getKey()))) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Transaction removed from cache for table {} -> id: {}, time: {}",
-                e.getKey(), v.getKey(), v.getValue());
-          }
-          if (updateDeleteForTable != null) {
-            updateDeleteForTable.remove(v.getKey());
-          }
-          it.remove();
-          removed++;
-        }
-      }
-    }
-    return removed;
-  }
-
-  /**
-   * Checks whether the given materialization exists in the invalidation cache.
-   * @param dbName the database name for the materialization
-   * @param tblName the table name for the materialization
-   * @return true if we have information about the materialization in the cache,
-   * false otherwise
-   */
-  public boolean containsMaterialization(String dbName, String tblName) {
-    if (disable || dbName == null || tblName == null) {
-      return false;
-    }
-    ConcurrentMap<String, Materialization> dbMaterializations = materializations.get(dbName);
-    if (dbMaterializations == null || dbMaterializations.get(tblName) == null) {
-      // This is a table
-      return false;
-    }
-    return true;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockCleanerTask.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockCleanerTask.java
index 8ca9ede..9ce7d6d 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockCleanerTask.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsRebuildLockCleanerTask.java
@@ -18,7 +18,10 @@
 package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -32,6 +35,7 @@ public class MaterializationsRebuildLockCleanerTask implements MetastoreTaskThre
   private static final Logger LOG = LoggerFactory.getLogger(MaterializationsRebuildLockCleanerTask.class);
 
   private Configuration conf;
+  private TxnStore txnHandler;
 
   @Override
   public long runFrequency(TimeUnit unit) {
@@ -41,6 +45,7 @@ public class MaterializationsRebuildLockCleanerTask implements MetastoreTaskThre
   @Override
   public void setConf(Configuration configuration) {
     conf = configuration;
+    txnHandler = TxnUtils.getTxnStore(conf);
   }
 
   @Override
@@ -50,11 +55,26 @@ public class MaterializationsRebuildLockCleanerTask implements MetastoreTaskThre
 
   @Override
   public void run() {
-    long removedCnt = MaterializationsRebuildLockHandler.get().cleanupResourceLocks(
-        MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS));
-    if (removedCnt > 0) {
-      if (LOG.isDebugEnabled()) {
-        LOG.info("Number of materialization locks deleted: " + removedCnt);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Cleaning up materialization rebuild locks");
+    }
+
+    TxnStore.MutexAPI.LockHandle handle = null;
+    try {
+      handle = txnHandler.getMutexAPI().acquireLock(TxnStore.MUTEX_KEY.MaterializationRebuild.name());
+      ValidTxnList validTxnList = TxnUtils.createValidReadTxnList(txnHandler.getOpenTxns(), 0);
+      long removedCnt = txnHandler.cleanupMaterializationRebuildLocks(validTxnList,
+          MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS));
+      if (removedCnt > 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Number of materialization locks deleted: " + removedCnt);
+        }
+      }
+    } catch(Throwable t) {
+      LOG.error("Serious error in {}", Thread.currentThread().getName(), ": {}" + t.getMessage(), t);
+    } finally {
+      if(handle != null) {
+        handle.releaseLocks();
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 8721022..bdcbf41 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1335,13 +1335,6 @@ public class ObjectStore implements RawStore, Configurable {
     } finally {
       if (!commited) {
         rollbackTransaction();
-      } else {
-        if (MetaStoreUtils.isMaterializedViewTable(tbl)) {
-          // Add to the invalidation cache
-          MaterializationsInvalidationCache.get().createMaterializedView(
-              tbl.getDbName(), tbl.getTableName(), tbl.getCreationMetadata().getTablesUsed(),
-              tbl.getCreationMetadata().getValidTxnList());
-        }
       }
     }
   }
@@ -1439,10 +1432,6 @@ public class ObjectStore implements RawStore, Configurable {
     } finally {
       if (!success) {
         rollbackTransaction();
-      } else {
-        if (materializedView) {
-          MaterializationsInvalidationCache.get().dropMaterializedView(dbName, tableName);
-        }
       }
     }
     return success;
@@ -2285,13 +2274,14 @@ public class ObjectStore implements RawStore, Configurable {
     if (m == null) {
       return null;
     }
+    assert !m.isSetMaterializationTime();
     Set<MTable> tablesUsed = new HashSet<>();
     for (String fullyQualifiedName : m.getTablesUsed()) {
       String[] names =  fullyQualifiedName.split("\\.");
       tablesUsed.add(getMTable(m.getCatName(), names[0], names[1], false).mtbl);
     }
     return new MCreationMetadata(m.getCatName(), m.getDbName(), m.getTblName(),
-        tablesUsed, m.getValidTxnList());
+        tablesUsed, m.getValidTxnList(), System.currentTimeMillis());
   }
 
   private CreationMetadata convertToCreationMetadata(
@@ -2307,6 +2297,7 @@ public class ObjectStore implements RawStore, Configurable {
     }
     CreationMetadata r = new CreationMetadata(s.getCatalogName(),
         s.getDbName(), s.getTblName(), tablesUsed);
+    r.setMaterializationTime(s.getMaterializationTime());
     if (s.getTxnList() != null) {
       r.setValidTxnList(s.getTxnList());
     }
@@ -4210,16 +4201,13 @@ public class ObjectStore implements RawStore, Configurable {
       MCreationMetadata newMcm = convertToMCreationMetadata(cm);
       MCreationMetadata mcm = getCreationMetadata(catName, dbname, tablename);
       mcm.setTables(newMcm.getTables());
+      mcm.setMaterializationTime(newMcm.getMaterializationTime());
       mcm.setTxnList(newMcm.getTxnList());
       // commit the changes
       success = commitTransaction();
     } finally {
       if (!success) {
         rollbackTransaction();
-      } else {
-        // Add to the invalidation cache if the creation signature has changed
-        MaterializationsInvalidationCache.get().alterMaterializedView(
-            dbname, tablename, cm.getTablesUsed(), cm.getValidTxnList());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 74a301f..c2bbba5 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -21,7 +21,6 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader;
 import org.apache.hadoop.hive.metastore.HiveAlterHandler;
-import org.apache.hadoop.hive.metastore.MaterializationsCacheCleanerTask;
 import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockCleanerTask;
 import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
 import org.apache.hadoop.hive.metastore.RuntimeStatsCleanerTask;
@@ -762,8 +761,6 @@ public class MetastoreConf {
     TASK_THREADS_ALWAYS("metastore.task.threads.always", "metastore.task.threads.always",
         EventCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName() + "," +
         "org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask" + "," +
-        MaterializationsCacheCleanerTask.class.getName() + "," +
-            MaterializationsRebuildLockCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName() + "," +
             "org.apache.hadoop.hive.metastore.HiveProtoEventsCleanerTask",
         "Comma separated list of tasks that will be started in separate threads.  These will " +
             "always be started, regardless of whether the metastore is running in embedded mode " +
@@ -772,7 +769,8 @@ public class MetastoreConf {
         AcidHouseKeeperService.class.getName() + "," +
             AcidOpenTxnsCounterService.class.getName() + "," +
             AcidCompactionHistoryService.class.getName() + "," +
-            AcidWriteSetService.class.getName(),
+            AcidWriteSetService.class.getName() + "," +
+            MaterializationsRebuildLockCleanerTask.class.getName(),
         "Command separated list of tasks that will be started in separate threads.  These will be" +
             " started only when the metastore is running as a separate service.  They must " +
             "implement " + MetastoreTaskThread.class.getName()),

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java
index 66b5d48..2d65126 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java
@@ -22,8 +22,8 @@ import java.util.Set;
 /**
  * Represents the creation metadata of a materialization.
  * It includes the database and table name for the materialization,
- * the set of tables that it uses, and the valid transaction list
- * when it was created.
+ * the set of tables that it uses, the valid transaction list
+ * when it was created, and the creation/rebuild time.
  */
 public class MCreationMetadata {
 
@@ -32,17 +32,19 @@ public class MCreationMetadata {
   private String tblName;
   private Set<MTable> tables;
   private String txnList;
+  private long materializationTime;
 
   public MCreationMetadata() {
   }
 
   public MCreationMetadata(String catName, String dbName, String tblName,
-      Set<MTable> tables, String txnList) {
+      Set<MTable> tables, String txnList, long materializationTime) {
     this.catalogName = catName;
     this.dbName = dbName;
     this.tblName = tblName;
     this.tables = tables;
     this.txnList = txnList;
+    this.materializationTime = materializationTime;
   }
 
   public Set<MTable> getTables() {
@@ -84,4 +86,12 @@ public class MCreationMetadata {
   public void setTblName(String tblName) {
     this.tblName = tblName;
   }
+
+  public long getMaterializationTime() {
+    return materializationTime;
+  }
+
+  public void setMaterializationTime(long materializationTime) {
+    this.materializationTime = materializationTime;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index f8c2ca2..2bae133 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -94,9 +94,9 @@ public final class TxnDbUtil {
           "  CTC_DATABASE varchar(128) NOT NULL," +
           "  CTC_TABLE varchar(128)," +
           "  CTC_PARTITION varchar(767)," +
-          "  CTC_ID bigint GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1) NOT NULL," +
           "  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," +
-          "  CTC_WRITEID bigint)");
+          "  CTC_WRITEID bigint," +
+          "  CTC_UPDATE_DELETE char(1) NOT NULL)");
       stmt.execute("CREATE TABLE NEXT_TXN_ID (" + "  NTXN_NEXT bigint NOT NULL)");
       stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
 
@@ -194,6 +194,14 @@ public final class TxnDbUtil {
           " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
       );
 
+      stmt.execute("CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (" +
+          "  MRL_TXN_ID BIGINT NOT NULL, " +
+          "  MRL_DB_NAME VARCHAR(128) NOT NULL, " +
+          "  MRL_TBL_NAME VARCHAR(256) NOT NULL, " +
+          "  MRL_LAST_HEARTBEAT BIGINT NOT NULL, " +
+          "  PRIMARY KEY(MRL_TXN_ID))"
+      );
+
       try {
         stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
 
@@ -336,6 +344,7 @@ public final class TxnDbUtil {
         success &= dropTable(stmt, "AUX_TABLE", retryCount);
         success &= dropTable(stmt, "WRITE_SET", retryCount);
         success &= dropTable(stmt, "REPL_TXN_MAP", retryCount);
+        success &= dropTable(stmt, "MATERIALIZATION_REBUILD_LOCKS", retryCount);
         /*
          * Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
          * table which are not txn related to generate primary key. So if these tables are dropped


[45/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 7e13e19,29e787b..33d2be7
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@@ -15828,14 -15496,14 +15828,14 @@@ class ThriftHiveMetastore_get_databases
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size855 = 0;
-             $_etype858 = 0;
-             $xfer += $input->readListBegin($_etype858, $_size855);
-             for ($_i859 = 0; $_i859 < $_size855; ++$_i859)
 -            $_size833 = 0;
 -            $_etype836 = 0;
 -            $xfer += $input->readListBegin($_etype836, $_size833);
 -            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
++            $_size847 = 0;
++            $_etype850 = 0;
++            $xfer += $input->readListBegin($_etype850, $_size847);
++            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
              {
-               $elem860 = null;
-               $xfer += $input->readString($elem860);
-               $this->success []= $elem860;
 -              $elem838 = null;
 -              $xfer += $input->readString($elem838);
 -              $this->success []= $elem838;
++              $elem852 = null;
++              $xfer += $input->readString($elem852);
++              $this->success []= $elem852;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -15871,9 -15539,9 +15871,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter861)
 -          foreach ($this->success as $iter839)
++          foreach ($this->success as $iter853)
            {
-             $xfer += $output->writeString($iter861);
 -            $xfer += $output->writeString($iter839);
++            $xfer += $output->writeString($iter853);
            }
          }
          $output->writeListEnd();
@@@ -16004,14 -15672,14 +16004,14 @@@ class ThriftHiveMetastore_get_all_datab
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size862 = 0;
-             $_etype865 = 0;
-             $xfer += $input->readListBegin($_etype865, $_size862);
-             for ($_i866 = 0; $_i866 < $_size862; ++$_i866)
 -            $_size840 = 0;
 -            $_etype843 = 0;
 -            $xfer += $input->readListBegin($_etype843, $_size840);
 -            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
++            $_size854 = 0;
++            $_etype857 = 0;
++            $xfer += $input->readListBegin($_etype857, $_size854);
++            for ($_i858 = 0; $_i858 < $_size854; ++$_i858)
              {
-               $elem867 = null;
-               $xfer += $input->readString($elem867);
-               $this->success []= $elem867;
 -              $elem845 = null;
 -              $xfer += $input->readString($elem845);
 -              $this->success []= $elem845;
++              $elem859 = null;
++              $xfer += $input->readString($elem859);
++              $this->success []= $elem859;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -16047,9 -15715,9 +16047,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter868)
 -          foreach ($this->success as $iter846)
++          foreach ($this->success as $iter860)
            {
-             $xfer += $output->writeString($iter868);
 -            $xfer += $output->writeString($iter846);
++            $xfer += $output->writeString($iter860);
            }
          }
          $output->writeListEnd();
@@@ -17050,18 -16718,18 +17050,18 @@@ class ThriftHiveMetastore_get_type_all_
          case 0:
            if ($ftype == TType::MAP) {
              $this->success = array();
-             $_size869 = 0;
-             $_ktype870 = 0;
-             $_vtype871 = 0;
-             $xfer += $input->readMapBegin($_ktype870, $_vtype871, $_size869);
-             for ($_i873 = 0; $_i873 < $_size869; ++$_i873)
 -            $_size847 = 0;
 -            $_ktype848 = 0;
 -            $_vtype849 = 0;
 -            $xfer += $input->readMapBegin($_ktype848, $_vtype849, $_size847);
 -            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
++            $_size861 = 0;
++            $_ktype862 = 0;
++            $_vtype863 = 0;
++            $xfer += $input->readMapBegin($_ktype862, $_vtype863, $_size861);
++            for ($_i865 = 0; $_i865 < $_size861; ++$_i865)
              {
-               $key874 = '';
-               $val875 = new \metastore\Type();
-               $xfer += $input->readString($key874);
-               $val875 = new \metastore\Type();
-               $xfer += $val875->read($input);
-               $this->success[$key874] = $val875;
 -              $key852 = '';
 -              $val853 = new \metastore\Type();
 -              $xfer += $input->readString($key852);
 -              $val853 = new \metastore\Type();
 -              $xfer += $val853->read($input);
 -              $this->success[$key852] = $val853;
++              $key866 = '';
++              $val867 = new \metastore\Type();
++              $xfer += $input->readString($key866);
++              $val867 = new \metastore\Type();
++              $xfer += $val867->read($input);
++              $this->success[$key866] = $val867;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -17097,10 -16765,10 +17097,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $kiter876 => $viter877)
 -          foreach ($this->success as $kiter854 => $viter855)
++          foreach ($this->success as $kiter868 => $viter869)
            {
-             $xfer += $output->writeString($kiter876);
-             $xfer += $viter877->write($output);
 -            $xfer += $output->writeString($kiter854);
 -            $xfer += $viter855->write($output);
++            $xfer += $output->writeString($kiter868);
++            $xfer += $viter869->write($output);
            }
          }
          $output->writeMapEnd();
@@@ -17304,15 -16972,15 +17304,15 @@@ class ThriftHiveMetastore_get_fields_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size878 = 0;
-             $_etype881 = 0;
-             $xfer += $input->readListBegin($_etype881, $_size878);
-             for ($_i882 = 0; $_i882 < $_size878; ++$_i882)
 -            $_size856 = 0;
 -            $_etype859 = 0;
 -            $xfer += $input->readListBegin($_etype859, $_size856);
 -            for ($_i860 = 0; $_i860 < $_size856; ++$_i860)
++            $_size870 = 0;
++            $_etype873 = 0;
++            $xfer += $input->readListBegin($_etype873, $_size870);
++            for ($_i874 = 0; $_i874 < $_size870; ++$_i874)
              {
-               $elem883 = null;
-               $elem883 = new \metastore\FieldSchema();
-               $xfer += $elem883->read($input);
-               $this->success []= $elem883;
 -              $elem861 = null;
 -              $elem861 = new \metastore\FieldSchema();
 -              $xfer += $elem861->read($input);
 -              $this->success []= $elem861;
++              $elem875 = null;
++              $elem875 = new \metastore\FieldSchema();
++              $xfer += $elem875->read($input);
++              $this->success []= $elem875;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -17364,9 -17032,9 +17364,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter884)
 -          foreach ($this->success as $iter862)
++          foreach ($this->success as $iter876)
            {
-             $xfer += $iter884->write($output);
 -            $xfer += $iter862->write($output);
++            $xfer += $iter876->write($output);
            }
          }
          $output->writeListEnd();
@@@ -17608,15 -17276,15 +17608,15 @@@ class ThriftHiveMetastore_get_fields_wi
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size885 = 0;
-             $_etype888 = 0;
-             $xfer += $input->readListBegin($_etype888, $_size885);
-             for ($_i889 = 0; $_i889 < $_size885; ++$_i889)
 -            $_size863 = 0;
 -            $_etype866 = 0;
 -            $xfer += $input->readListBegin($_etype866, $_size863);
 -            for ($_i867 = 0; $_i867 < $_size863; ++$_i867)
++            $_size877 = 0;
++            $_etype880 = 0;
++            $xfer += $input->readListBegin($_etype880, $_size877);
++            for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
              {
-               $elem890 = null;
-               $elem890 = new \metastore\FieldSchema();
-               $xfer += $elem890->read($input);
-               $this->success []= $elem890;
 -              $elem868 = null;
 -              $elem868 = new \metastore\FieldSchema();
 -              $xfer += $elem868->read($input);
 -              $this->success []= $elem868;
++              $elem882 = null;
++              $elem882 = new \metastore\FieldSchema();
++              $xfer += $elem882->read($input);
++              $this->success []= $elem882;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -17668,9 -17336,9 +17668,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter891)
 -          foreach ($this->success as $iter869)
++          foreach ($this->success as $iter883)
            {
-             $xfer += $iter891->write($output);
 -            $xfer += $iter869->write($output);
++            $xfer += $iter883->write($output);
            }
          }
          $output->writeListEnd();
@@@ -17884,15 -17552,15 +17884,15 @@@ class ThriftHiveMetastore_get_schema_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size892 = 0;
-             $_etype895 = 0;
-             $xfer += $input->readListBegin($_etype895, $_size892);
-             for ($_i896 = 0; $_i896 < $_size892; ++$_i896)
 -            $_size870 = 0;
 -            $_etype873 = 0;
 -            $xfer += $input->readListBegin($_etype873, $_size870);
 -            for ($_i874 = 0; $_i874 < $_size870; ++$_i874)
++            $_size884 = 0;
++            $_etype887 = 0;
++            $xfer += $input->readListBegin($_etype887, $_size884);
++            for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
              {
-               $elem897 = null;
-               $elem897 = new \metastore\FieldSchema();
-               $xfer += $elem897->read($input);
-               $this->success []= $elem897;
 -              $elem875 = null;
 -              $elem875 = new \metastore\FieldSchema();
 -              $xfer += $elem875->read($input);
 -              $this->success []= $elem875;
++              $elem889 = null;
++              $elem889 = new \metastore\FieldSchema();
++              $xfer += $elem889->read($input);
++              $this->success []= $elem889;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -17944,9 -17612,9 +17944,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter898)
 -          foreach ($this->success as $iter876)
++          foreach ($this->success as $iter890)
            {
-             $xfer += $iter898->write($output);
 -            $xfer += $iter876->write($output);
++            $xfer += $iter890->write($output);
            }
          }
          $output->writeListEnd();
@@@ -18188,15 -17856,15 +18188,15 @@@ class ThriftHiveMetastore_get_schema_wi
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size899 = 0;
-             $_etype902 = 0;
-             $xfer += $input->readListBegin($_etype902, $_size899);
-             for ($_i903 = 0; $_i903 < $_size899; ++$_i903)
 -            $_size877 = 0;
 -            $_etype880 = 0;
 -            $xfer += $input->readListBegin($_etype880, $_size877);
 -            for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
++            $_size891 = 0;
++            $_etype894 = 0;
++            $xfer += $input->readListBegin($_etype894, $_size891);
++            for ($_i895 = 0; $_i895 < $_size891; ++$_i895)
              {
-               $elem904 = null;
-               $elem904 = new \metastore\FieldSchema();
-               $xfer += $elem904->read($input);
-               $this->success []= $elem904;
 -              $elem882 = null;
 -              $elem882 = new \metastore\FieldSchema();
 -              $xfer += $elem882->read($input);
 -              $this->success []= $elem882;
++              $elem896 = null;
++              $elem896 = new \metastore\FieldSchema();
++              $xfer += $elem896->read($input);
++              $this->success []= $elem896;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -18248,9 -17916,9 +18248,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter905)
 -          foreach ($this->success as $iter883)
++          foreach ($this->success as $iter897)
            {
-             $xfer += $iter905->write($output);
 -            $xfer += $iter883->write($output);
++            $xfer += $iter897->write($output);
            }
          }
          $output->writeListEnd();
@@@ -18922,15 -18590,15 +18922,15 @@@ class ThriftHiveMetastore_create_table_
          case 2:
            if ($ftype == TType::LST) {
              $this->primaryKeys = array();
-             $_size906 = 0;
-             $_etype909 = 0;
-             $xfer += $input->readListBegin($_etype909, $_size906);
-             for ($_i910 = 0; $_i910 < $_size906; ++$_i910)
 -            $_size884 = 0;
 -            $_etype887 = 0;
 -            $xfer += $input->readListBegin($_etype887, $_size884);
 -            for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
++            $_size898 = 0;
++            $_etype901 = 0;
++            $xfer += $input->readListBegin($_etype901, $_size898);
++            for ($_i902 = 0; $_i902 < $_size898; ++$_i902)
              {
-               $elem911 = null;
-               $elem911 = new \metastore\SQLPrimaryKey();
-               $xfer += $elem911->read($input);
-               $this->primaryKeys []= $elem911;
 -              $elem889 = null;
 -              $elem889 = new \metastore\SQLPrimaryKey();
 -              $xfer += $elem889->read($input);
 -              $this->primaryKeys []= $elem889;
++              $elem903 = null;
++              $elem903 = new \metastore\SQLPrimaryKey();
++              $xfer += $elem903->read($input);
++              $this->primaryKeys []= $elem903;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -18940,15 -18608,15 +18940,15 @@@
          case 3:
            if ($ftype == TType::LST) {
              $this->foreignKeys = array();
-             $_size912 = 0;
-             $_etype915 = 0;
-             $xfer += $input->readListBegin($_etype915, $_size912);
-             for ($_i916 = 0; $_i916 < $_size912; ++$_i916)
 -            $_size890 = 0;
 -            $_etype893 = 0;
 -            $xfer += $input->readListBegin($_etype893, $_size890);
 -            for ($_i894 = 0; $_i894 < $_size890; ++$_i894)
++            $_size904 = 0;
++            $_etype907 = 0;
++            $xfer += $input->readListBegin($_etype907, $_size904);
++            for ($_i908 = 0; $_i908 < $_size904; ++$_i908)
              {
-               $elem917 = null;
-               $elem917 = new \metastore\SQLForeignKey();
-               $xfer += $elem917->read($input);
-               $this->foreignKeys []= $elem917;
 -              $elem895 = null;
 -              $elem895 = new \metastore\SQLForeignKey();
 -              $xfer += $elem895->read($input);
 -              $this->foreignKeys []= $elem895;
++              $elem909 = null;
++              $elem909 = new \metastore\SQLForeignKey();
++              $xfer += $elem909->read($input);
++              $this->foreignKeys []= $elem909;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -18958,15 -18626,15 +18958,15 @@@
          case 4:
            if ($ftype == TType::LST) {
              $this->uniqueConstraints = array();
-             $_size918 = 0;
-             $_etype921 = 0;
-             $xfer += $input->readListBegin($_etype921, $_size918);
-             for ($_i922 = 0; $_i922 < $_size918; ++$_i922)
 -            $_size896 = 0;
 -            $_etype899 = 0;
 -            $xfer += $input->readListBegin($_etype899, $_size896);
 -            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
++            $_size910 = 0;
++            $_etype913 = 0;
++            $xfer += $input->readListBegin($_etype913, $_size910);
++            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
              {
-               $elem923 = null;
-               $elem923 = new \metastore\SQLUniqueConstraint();
-               $xfer += $elem923->read($input);
-               $this->uniqueConstraints []= $elem923;
 -              $elem901 = null;
 -              $elem901 = new \metastore\SQLUniqueConstraint();
 -              $xfer += $elem901->read($input);
 -              $this->uniqueConstraints []= $elem901;
++              $elem915 = null;
++              $elem915 = new \metastore\SQLUniqueConstraint();
++              $xfer += $elem915->read($input);
++              $this->uniqueConstraints []= $elem915;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -18976,15 -18644,15 +18976,15 @@@
          case 5:
            if ($ftype == TType::LST) {
              $this->notNullConstraints = array();
-             $_size924 = 0;
-             $_etype927 = 0;
-             $xfer += $input->readListBegin($_etype927, $_size924);
-             for ($_i928 = 0; $_i928 < $_size924; ++$_i928)
 -            $_size902 = 0;
 -            $_etype905 = 0;
 -            $xfer += $input->readListBegin($_etype905, $_size902);
 -            for ($_i906 = 0; $_i906 < $_size902; ++$_i906)
++            $_size916 = 0;
++            $_etype919 = 0;
++            $xfer += $input->readListBegin($_etype919, $_size916);
++            for ($_i920 = 0; $_i920 < $_size916; ++$_i920)
              {
-               $elem929 = null;
-               $elem929 = new \metastore\SQLNotNullConstraint();
-               $xfer += $elem929->read($input);
-               $this->notNullConstraints []= $elem929;
 -              $elem907 = null;
 -              $elem907 = new \metastore\SQLNotNullConstraint();
 -              $xfer += $elem907->read($input);
 -              $this->notNullConstraints []= $elem907;
++              $elem921 = null;
++              $elem921 = new \metastore\SQLNotNullConstraint();
++              $xfer += $elem921->read($input);
++              $this->notNullConstraints []= $elem921;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -18994,15 -18662,15 +18994,15 @@@
          case 6:
            if ($ftype == TType::LST) {
              $this->defaultConstraints = array();
-             $_size930 = 0;
-             $_etype933 = 0;
-             $xfer += $input->readListBegin($_etype933, $_size930);
-             for ($_i934 = 0; $_i934 < $_size930; ++$_i934)
 -            $_size908 = 0;
 -            $_etype911 = 0;
 -            $xfer += $input->readListBegin($_etype911, $_size908);
 -            for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
++            $_size922 = 0;
++            $_etype925 = 0;
++            $xfer += $input->readListBegin($_etype925, $_size922);
++            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
              {
-               $elem935 = null;
-               $elem935 = new \metastore\SQLDefaultConstraint();
-               $xfer += $elem935->read($input);
-               $this->defaultConstraints []= $elem935;
 -              $elem913 = null;
 -              $elem913 = new \metastore\SQLDefaultConstraint();
 -              $xfer += $elem913->read($input);
 -              $this->defaultConstraints []= $elem913;
++              $elem927 = null;
++              $elem927 = new \metastore\SQLDefaultConstraint();
++              $xfer += $elem927->read($input);
++              $this->defaultConstraints []= $elem927;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19012,15 -18680,15 +19012,15 @@@
          case 7:
            if ($ftype == TType::LST) {
              $this->checkConstraints = array();
-             $_size936 = 0;
-             $_etype939 = 0;
-             $xfer += $input->readListBegin($_etype939, $_size936);
-             for ($_i940 = 0; $_i940 < $_size936; ++$_i940)
 -            $_size914 = 0;
 -            $_etype917 = 0;
 -            $xfer += $input->readListBegin($_etype917, $_size914);
 -            for ($_i918 = 0; $_i918 < $_size914; ++$_i918)
++            $_size928 = 0;
++            $_etype931 = 0;
++            $xfer += $input->readListBegin($_etype931, $_size928);
++            for ($_i932 = 0; $_i932 < $_size928; ++$_i932)
              {
-               $elem941 = null;
-               $elem941 = new \metastore\SQLCheckConstraint();
-               $xfer += $elem941->read($input);
-               $this->checkConstraints []= $elem941;
 -              $elem919 = null;
 -              $elem919 = new \metastore\SQLCheckConstraint();
 -              $xfer += $elem919->read($input);
 -              $this->checkConstraints []= $elem919;
++              $elem933 = null;
++              $elem933 = new \metastore\SQLCheckConstraint();
++              $xfer += $elem933->read($input);
++              $this->checkConstraints []= $elem933;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -19056,9 -18724,9 +19056,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
          {
-           foreach ($this->primaryKeys as $iter942)
 -          foreach ($this->primaryKeys as $iter920)
++          foreach ($this->primaryKeys as $iter934)
            {
-             $xfer += $iter942->write($output);
 -            $xfer += $iter920->write($output);
++            $xfer += $iter934->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19073,9 -18741,9 +19073,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
          {
-           foreach ($this->foreignKeys as $iter943)
 -          foreach ($this->foreignKeys as $iter921)
++          foreach ($this->foreignKeys as $iter935)
            {
-             $xfer += $iter943->write($output);
 -            $xfer += $iter921->write($output);
++            $xfer += $iter935->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19090,9 -18758,9 +19090,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints));
          {
-           foreach ($this->uniqueConstraints as $iter944)
 -          foreach ($this->uniqueConstraints as $iter922)
++          foreach ($this->uniqueConstraints as $iter936)
            {
-             $xfer += $iter944->write($output);
 -            $xfer += $iter922->write($output);
++            $xfer += $iter936->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19107,9 -18775,9 +19107,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints));
          {
-           foreach ($this->notNullConstraints as $iter945)
 -          foreach ($this->notNullConstraints as $iter923)
++          foreach ($this->notNullConstraints as $iter937)
            {
-             $xfer += $iter945->write($output);
 -            $xfer += $iter923->write($output);
++            $xfer += $iter937->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19124,9 -18792,9 +19124,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints));
          {
-           foreach ($this->defaultConstraints as $iter946)
 -          foreach ($this->defaultConstraints as $iter924)
++          foreach ($this->defaultConstraints as $iter938)
            {
-             $xfer += $iter946->write($output);
 -            $xfer += $iter924->write($output);
++            $xfer += $iter938->write($output);
            }
          }
          $output->writeListEnd();
@@@ -19141,9 -18809,9 +19141,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->checkConstraints));
          {
-           foreach ($this->checkConstraints as $iter947)
 -          foreach ($this->checkConstraints as $iter925)
++          foreach ($this->checkConstraints as $iter939)
            {
-             $xfer += $iter947->write($output);
 -            $xfer += $iter925->write($output);
++            $xfer += $iter939->write($output);
            }
          }
          $output->writeListEnd();
@@@ -21143,14 -20811,14 +21143,14 @@@ class ThriftHiveMetastore_truncate_tabl
          case 3:
            if ($ftype == TType::LST) {
              $this->partNames = array();
-             $_size948 = 0;
-             $_etype951 = 0;
-             $xfer += $input->readListBegin($_etype951, $_size948);
-             for ($_i952 = 0; $_i952 < $_size948; ++$_i952)
 -            $_size926 = 0;
 -            $_etype929 = 0;
 -            $xfer += $input->readListBegin($_etype929, $_size926);
 -            for ($_i930 = 0; $_i930 < $_size926; ++$_i930)
++            $_size940 = 0;
++            $_etype943 = 0;
++            $xfer += $input->readListBegin($_etype943, $_size940);
++            for ($_i944 = 0; $_i944 < $_size940; ++$_i944)
              {
-               $elem953 = null;
-               $xfer += $input->readString($elem953);
-               $this->partNames []= $elem953;
 -              $elem931 = null;
 -              $xfer += $input->readString($elem931);
 -              $this->partNames []= $elem931;
++              $elem945 = null;
++              $xfer += $input->readString($elem945);
++              $this->partNames []= $elem945;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -21188,9 -20856,9 +21188,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->partNames));
          {
-           foreach ($this->partNames as $iter954)
 -          foreach ($this->partNames as $iter932)
++          foreach ($this->partNames as $iter946)
            {
-             $xfer += $output->writeString($iter954);
 -            $xfer += $output->writeString($iter932);
++            $xfer += $output->writeString($iter946);
            }
          }
          $output->writeListEnd();
@@@ -21626,14 -21109,14 +21626,14 @@@ class ThriftHiveMetastore_get_tables_re
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size955 = 0;
-             $_etype958 = 0;
-             $xfer += $input->readListBegin($_etype958, $_size955);
-             for ($_i959 = 0; $_i959 < $_size955; ++$_i959)
 -            $_size933 = 0;
 -            $_etype936 = 0;
 -            $xfer += $input->readListBegin($_etype936, $_size933);
 -            for ($_i937 = 0; $_i937 < $_size933; ++$_i937)
++            $_size947 = 0;
++            $_etype950 = 0;
++            $xfer += $input->readListBegin($_etype950, $_size947);
++            for ($_i951 = 0; $_i951 < $_size947; ++$_i951)
              {
-               $elem960 = null;
-               $xfer += $input->readString($elem960);
-               $this->success []= $elem960;
 -              $elem938 = null;
 -              $xfer += $input->readString($elem938);
 -              $this->success []= $elem938;
++              $elem952 = null;
++              $xfer += $input->readString($elem952);
++              $this->success []= $elem952;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -21669,9 -21152,9 +21669,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter961)
 -          foreach ($this->success as $iter939)
++          foreach ($this->success as $iter953)
            {
-             $xfer += $output->writeString($iter961);
 -            $xfer += $output->writeString($iter939);
++            $xfer += $output->writeString($iter953);
            }
          }
          $output->writeListEnd();
@@@ -21873,14 -21356,14 +21873,14 @@@ class ThriftHiveMetastore_get_tables_by
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size962 = 0;
-             $_etype965 = 0;
-             $xfer += $input->readListBegin($_etype965, $_size962);
-             for ($_i966 = 0; $_i966 < $_size962; ++$_i966)
 -            $_size940 = 0;
 -            $_etype943 = 0;
 -            $xfer += $input->readListBegin($_etype943, $_size940);
 -            for ($_i944 = 0; $_i944 < $_size940; ++$_i944)
++            $_size954 = 0;
++            $_etype957 = 0;
++            $xfer += $input->readListBegin($_etype957, $_size954);
++            for ($_i958 = 0; $_i958 < $_size954; ++$_i958)
              {
-               $elem967 = null;
-               $xfer += $input->readString($elem967);
-               $this->success []= $elem967;
 -              $elem945 = null;
 -              $xfer += $input->readString($elem945);
 -              $this->success []= $elem945;
++              $elem959 = null;
++              $xfer += $input->readString($elem959);
++              $this->success []= $elem959;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -21916,9 -21399,9 +21916,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter968)
 -          foreach ($this->success as $iter946)
++          foreach ($this->success as $iter960)
            {
-             $xfer += $output->writeString($iter968);
 -            $xfer += $output->writeString($iter946);
++            $xfer += $output->writeString($iter960);
            }
          }
          $output->writeListEnd();
@@@ -22074,14 -21557,14 +22074,14 @@@ class ThriftHiveMetastore_get_materiali
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size969 = 0;
-             $_etype972 = 0;
-             $xfer += $input->readListBegin($_etype972, $_size969);
-             for ($_i973 = 0; $_i973 < $_size969; ++$_i973)
 -            $_size947 = 0;
 -            $_etype950 = 0;
 -            $xfer += $input->readListBegin($_etype950, $_size947);
 -            for ($_i951 = 0; $_i951 < $_size947; ++$_i951)
++            $_size961 = 0;
++            $_etype964 = 0;
++            $xfer += $input->readListBegin($_etype964, $_size961);
++            for ($_i965 = 0; $_i965 < $_size961; ++$_i965)
              {
-               $elem974 = null;
-               $xfer += $input->readString($elem974);
-               $this->success []= $elem974;
 -              $elem952 = null;
 -              $xfer += $input->readString($elem952);
 -              $this->success []= $elem952;
++              $elem966 = null;
++              $xfer += $input->readString($elem966);
++              $this->success []= $elem966;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22117,9 -21600,9 +22117,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter975)
 -          foreach ($this->success as $iter953)
++          foreach ($this->success as $iter967)
            {
-             $xfer += $output->writeString($iter975);
 -            $xfer += $output->writeString($iter953);
++            $xfer += $output->writeString($iter967);
            }
          }
          $output->writeListEnd();
@@@ -22224,14 -21707,14 +22224,14 @@@ class ThriftHiveMetastore_get_table_met
          case 3:
            if ($ftype == TType::LST) {
              $this->tbl_types = array();
-             $_size976 = 0;
-             $_etype979 = 0;
-             $xfer += $input->readListBegin($_etype979, $_size976);
-             for ($_i980 = 0; $_i980 < $_size976; ++$_i980)
 -            $_size954 = 0;
 -            $_etype957 = 0;
 -            $xfer += $input->readListBegin($_etype957, $_size954);
 -            for ($_i958 = 0; $_i958 < $_size954; ++$_i958)
++            $_size968 = 0;
++            $_etype971 = 0;
++            $xfer += $input->readListBegin($_etype971, $_size968);
++            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
              {
-               $elem981 = null;
-               $xfer += $input->readString($elem981);
-               $this->tbl_types []= $elem981;
 -              $elem959 = null;
 -              $xfer += $input->readString($elem959);
 -              $this->tbl_types []= $elem959;
++              $elem973 = null;
++              $xfer += $input->readString($elem973);
++              $this->tbl_types []= $elem973;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22269,9 -21752,9 +22269,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->tbl_types));
          {
-           foreach ($this->tbl_types as $iter982)
 -          foreach ($this->tbl_types as $iter960)
++          foreach ($this->tbl_types as $iter974)
            {
-             $xfer += $output->writeString($iter982);
 -            $xfer += $output->writeString($iter960);
++            $xfer += $output->writeString($iter974);
            }
          }
          $output->writeListEnd();
@@@ -22348,15 -21831,15 +22348,15 @@@ class ThriftHiveMetastore_get_table_met
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size983 = 0;
-             $_etype986 = 0;
-             $xfer += $input->readListBegin($_etype986, $_size983);
-             for ($_i987 = 0; $_i987 < $_size983; ++$_i987)
 -            $_size961 = 0;
 -            $_etype964 = 0;
 -            $xfer += $input->readListBegin($_etype964, $_size961);
 -            for ($_i965 = 0; $_i965 < $_size961; ++$_i965)
++            $_size975 = 0;
++            $_etype978 = 0;
++            $xfer += $input->readListBegin($_etype978, $_size975);
++            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
              {
-               $elem988 = null;
-               $elem988 = new \metastore\TableMeta();
-               $xfer += $elem988->read($input);
-               $this->success []= $elem988;
 -              $elem966 = null;
 -              $elem966 = new \metastore\TableMeta();
 -              $xfer += $elem966->read($input);
 -              $this->success []= $elem966;
++              $elem980 = null;
++              $elem980 = new \metastore\TableMeta();
++              $xfer += $elem980->read($input);
++              $this->success []= $elem980;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22392,9 -21875,9 +22392,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter989)
 -          foreach ($this->success as $iter967)
++          foreach ($this->success as $iter981)
            {
-             $xfer += $iter989->write($output);
 -            $xfer += $iter967->write($output);
++            $xfer += $iter981->write($output);
            }
          }
          $output->writeListEnd();
@@@ -22550,14 -22033,14 +22550,14 @@@ class ThriftHiveMetastore_get_all_table
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size990 = 0;
-             $_etype993 = 0;
-             $xfer += $input->readListBegin($_etype993, $_size990);
-             for ($_i994 = 0; $_i994 < $_size990; ++$_i994)
 -            $_size968 = 0;
 -            $_etype971 = 0;
 -            $xfer += $input->readListBegin($_etype971, $_size968);
 -            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
++            $_size982 = 0;
++            $_etype985 = 0;
++            $xfer += $input->readListBegin($_etype985, $_size982);
++            for ($_i986 = 0; $_i986 < $_size982; ++$_i986)
              {
-               $elem995 = null;
-               $xfer += $input->readString($elem995);
-               $this->success []= $elem995;
 -              $elem973 = null;
 -              $xfer += $input->readString($elem973);
 -              $this->success []= $elem973;
++              $elem987 = null;
++              $xfer += $input->readString($elem987);
++              $this->success []= $elem987;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22593,9 -22076,9 +22593,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter996)
 -          foreach ($this->success as $iter974)
++          foreach ($this->success as $iter988)
            {
-             $xfer += $output->writeString($iter996);
 -            $xfer += $output->writeString($iter974);
++            $xfer += $output->writeString($iter988);
            }
          }
          $output->writeListEnd();
@@@ -22910,14 -22393,14 +22910,14 @@@ class ThriftHiveMetastore_get_table_obj
          case 2:
            if ($ftype == TType::LST) {
              $this->tbl_names = array();
-             $_size997 = 0;
-             $_etype1000 = 0;
-             $xfer += $input->readListBegin($_etype1000, $_size997);
-             for ($_i1001 = 0; $_i1001 < $_size997; ++$_i1001)
 -            $_size975 = 0;
 -            $_etype978 = 0;
 -            $xfer += $input->readListBegin($_etype978, $_size975);
 -            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
++            $_size989 = 0;
++            $_etype992 = 0;
++            $xfer += $input->readListBegin($_etype992, $_size989);
++            for ($_i993 = 0; $_i993 < $_size989; ++$_i993)
              {
-               $elem1002 = null;
-               $xfer += $input->readString($elem1002);
-               $this->tbl_names []= $elem1002;
 -              $elem980 = null;
 -              $xfer += $input->readString($elem980);
 -              $this->tbl_names []= $elem980;
++              $elem994 = null;
++              $xfer += $input->readString($elem994);
++              $this->tbl_names []= $elem994;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -22950,9 -22433,9 +22950,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->tbl_names));
          {
-           foreach ($this->tbl_names as $iter1003)
 -          foreach ($this->tbl_names as $iter981)
++          foreach ($this->tbl_names as $iter995)
            {
-             $xfer += $output->writeString($iter1003);
 -            $xfer += $output->writeString($iter981);
++            $xfer += $output->writeString($iter995);
            }
          }
          $output->writeListEnd();
@@@ -23017,15 -22500,15 +23017,15 @@@ class ThriftHiveMetastore_get_table_obj
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1004 = 0;
-             $_etype1007 = 0;
-             $xfer += $input->readListBegin($_etype1007, $_size1004);
-             for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008)
 -            $_size982 = 0;
 -            $_etype985 = 0;
 -            $xfer += $input->readListBegin($_etype985, $_size982);
 -            for ($_i986 = 0; $_i986 < $_size982; ++$_i986)
++            $_size996 = 0;
++            $_etype999 = 0;
++            $xfer += $input->readListBegin($_etype999, $_size996);
++            for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000)
              {
-               $elem1009 = null;
-               $elem1009 = new \metastore\Table();
-               $xfer += $elem1009->read($input);
-               $this->success []= $elem1009;
 -              $elem987 = null;
 -              $elem987 = new \metastore\Table();
 -              $xfer += $elem987->read($input);
 -              $this->success []= $elem987;
++              $elem1001 = null;
++              $elem1001 = new \metastore\Table();
++              $xfer += $elem1001->read($input);
++              $this->success []= $elem1001;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -23053,9 -22536,9 +23053,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1010)
 -          foreach ($this->success as $iter988)
++          foreach ($this->success as $iter1002)
            {
-             $xfer += $iter1010->write($output);
 -            $xfer += $iter988->write($output);
++            $xfer += $iter1002->write($output);
            }
          }
          $output->writeListEnd();
@@@ -24307,14 -23738,14 +24255,14 @@@ class ThriftHiveMetastore_get_table_nam
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1027 = 0;
-             $_etype1030 = 0;
-             $xfer += $input->readListBegin($_etype1030, $_size1027);
-             for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031)
 -            $_size989 = 0;
 -            $_etype992 = 0;
 -            $xfer += $input->readListBegin($_etype992, $_size989);
 -            for ($_i993 = 0; $_i993 < $_size989; ++$_i993)
++            $_size1003 = 0;
++            $_etype1006 = 0;
++            $xfer += $input->readListBegin($_etype1006, $_size1003);
++            for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007)
              {
-               $elem1032 = null;
-               $xfer += $input->readString($elem1032);
-               $this->success []= $elem1032;
 -              $elem994 = null;
 -              $xfer += $input->readString($elem994);
 -              $this->success []= $elem994;
++              $elem1008 = null;
++              $xfer += $input->readString($elem1008);
++              $this->success []= $elem1008;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -24366,9 -23797,9 +24314,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter1033)
 -          foreach ($this->success as $iter995)
++          foreach ($this->success as $iter1009)
            {
-             $xfer += $output->writeString($iter1033);
 -            $xfer += $output->writeString($iter995);
++            $xfer += $output->writeString($iter1009);
            }
          }
          $output->writeListEnd();
@@@ -25891,15 -25365,15 +25839,15 @@@ class ThriftHiveMetastore_add_partition
          case 1:
            if ($ftype == TType::LST) {
              $this->new_parts = array();
-             $_size1034 = 0;
-             $_etype1037 = 0;
-             $xfer += $input->readListBegin($_etype1037, $_size1034);
-             for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038)
 -            $_size1003 = 0;
 -            $_etype1006 = 0;
 -            $xfer += $input->readListBegin($_etype1006, $_size1003);
 -            for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007)
++            $_size1010 = 0;
++            $_etype1013 = 0;
++            $xfer += $input->readListBegin($_etype1013, $_size1010);
++            for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014)
              {
-               $elem1039 = null;
-               $elem1039 = new \metastore\Partition();
-               $xfer += $elem1039->read($input);
-               $this->new_parts []= $elem1039;
 -              $elem1008 = null;
 -              $elem1008 = new \metastore\PartitionSpec();
 -              $xfer += $elem1008->read($input);
 -              $this->new_parts []= $elem1008;
++              $elem1015 = null;
++              $elem1015 = new \metastore\Partition();
++              $xfer += $elem1015->read($input);
++              $this->new_parts []= $elem1015;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -25927,9 -25401,9 +25875,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->new_parts));
          {
-           foreach ($this->new_parts as $iter1040)
 -          foreach ($this->new_parts as $iter1009)
++          foreach ($this->new_parts as $iter1016)
            {
-             $xfer += $iter1040->write($output);
 -            $xfer += $iter1009->write($output);
++            $xfer += $iter1016->write($output);
            }
          }
          $output->writeListEnd();
@@@ -26142,17 -25637,30 +26090,17 @@@ class ThriftHiveMetastore_add_partition
        switch ($fid)
        {
          case 1:
 -          if ($ftype == TType::STRING) {
 -            $xfer += $input->readString($this->db_name);
 -          } else {
 -            $xfer += $input->skip($ftype);
 -          }
 -          break;
 -        case 2:
 -          if ($ftype == TType::STRING) {
 -            $xfer += $input->readString($this->tbl_name);
 -          } else {
 -            $xfer += $input->skip($ftype);
 -          }
 -          break;
 -        case 3:
            if ($ftype == TType::LST) {
 -            $this->part_vals = array();
 -            $_size1010 = 0;
 -            $_etype1013 = 0;
 -            $xfer += $input->readListBegin($_etype1013, $_size1010);
 -            for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014)
 +            $this->new_parts = array();
-             $_size1041 = 0;
-             $_etype1044 = 0;
-             $xfer += $input->readListBegin($_etype1044, $_size1041);
-             for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045)
++            $_size1017 = 0;
++            $_etype1020 = 0;
++            $xfer += $input->readListBegin($_etype1020, $_size1017);
++            for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021)
              {
-               $elem1046 = null;
-               $elem1046 = new \metastore\PartitionSpec();
-               $xfer += $elem1046->read($input);
-               $this->new_parts []= $elem1046;
 -              $elem1015 = null;
 -              $xfer += $input->readString($elem1015);
 -              $this->part_vals []= $elem1015;
++              $elem1022 = null;
++              $elem1022 = new \metastore\PartitionSpec();
++              $xfer += $elem1022->read($input);
++              $this->new_parts []= $elem1022;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -26171,18 -25679,28 +26119,18 @@@
  
    public function write($output) {
      $xfer = 0;
 -    $xfer += $output->writeStructBegin('ThriftHiveMetastore_append_partition_args');
 -    if ($this->db_name !== null) {
 -      $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
 -      $xfer += $output->writeString($this->db_name);
 -      $xfer += $output->writeFieldEnd();
 -    }
 -    if ($this->tbl_name !== null) {
 -      $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
 -      $xfer += $output->writeString($this->tbl_name);
 -      $xfer += $output->writeFieldEnd();
 -    }
 -    if ($this->part_vals !== null) {
 -      if (!is_array($this->part_vals)) {
 +    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_pspec_args');
 +    if ($this->new_parts !== null) {
 +      if (!is_array($this->new_parts)) {
          throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
        }
 -      $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3);
 +      $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1);
        {
 -        $output->writeListBegin(TType::STRING, count($this->part_vals));
 +        $output->writeListBegin(TType::STRUCT, count($this->new_parts));
          {
-           foreach ($this->new_parts as $iter1047)
 -          foreach ($this->part_vals as $iter1016)
++          foreach ($this->new_parts as $iter1023)
            {
-             $xfer += $iter1047->write($output);
 -            $xfer += $output->writeString($iter1016);
++            $xfer += $iter1023->write($output);
            }
          }
          $output->writeListEnd();
@@@ -26416,32 -25914,9 +26364,32 @@@ class ThriftHiveMetastore_append_partit
        switch ($fid)
        {
          case 1:
 -          if ($ftype == TType::STRUCT) {
 -            $this->request = new \metastore\AddPartitionsRequest();
 -            $xfer += $this->request->read($input);
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->db_name);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->tbl_name);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::LST) {
 +            $this->part_vals = array();
-             $_size1048 = 0;
-             $_etype1051 = 0;
-             $xfer += $input->readListBegin($_etype1051, $_size1048);
-             for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052)
++            $_size1024 = 0;
++            $_etype1027 = 0;
++            $xfer += $input->readListBegin($_etype1027, $_size1024);
++            for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028)
 +            {
-               $elem1053 = null;
-               $xfer += $input->readString($elem1053);
-               $this->part_vals []= $elem1053;
++              $elem1029 = null;
++              $xfer += $input->readString($elem1029);
++              $this->part_vals []= $elem1029;
 +            }
 +            $xfer += $input->readListEnd();
            } else {
              $xfer += $input->skip($ftype);
            }
@@@ -26458,32 -25933,13 +26406,32 @@@
  
    public function write($output) {
      $xfer = 0;
 -    $xfer += $output->writeStructBegin('ThriftHiveMetastore_add_partitions_req_args');
 -    if ($this->request !== null) {
 -      if (!is_object($this->request)) {
 +    $xfer += $output->writeStructBegin('ThriftHiveMetastore_append_partition_args');
 +    if ($this->db_name !== null) {
 +      $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1);
 +      $xfer += $output->writeString($this->db_name);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tbl_name !== null) {
 +      $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2);
 +      $xfer += $output->writeString($this->tbl_name);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->part_vals !== null) {
 +      if (!is_array($this->part_vals)) {
          throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
        }
 -      $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1);
 -      $xfer += $this->request->write($output);
 +      $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3);
 +      {
 +        $output->writeListBegin(TType::STRING, count($this->part_vals));
 +        {
-           foreach ($this->part_vals as $iter1054)
++          foreach ($this->part_vals as $iter1030)
 +          {
-             $xfer += $output->writeString($iter1054);
++            $xfer += $output->writeString($iter1030);
 +          }
 +        }
 +        $output->writeListEnd();
 +      }
        $xfer += $output->writeFieldEnd();
      }
      $xfer += $output->writeFieldStop();
@@@ -26981,14 -26202,14 +26929,14 @@@ class ThriftHiveMetastore_append_partit
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1055 = 0;
-             $_etype1058 = 0;
-             $xfer += $input->readListBegin($_etype1058, $_size1055);
-             for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059)
 -            $_size1017 = 0;
 -            $_etype1020 = 0;
 -            $xfer += $input->readListBegin($_etype1020, $_size1017);
 -            for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021)
++            $_size1031 = 0;
++            $_etype1034 = 0;
++            $xfer += $input->readListBegin($_etype1034, $_size1031);
++            for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035)
              {
-               $elem1060 = null;
-               $xfer += $input->readString($elem1060);
-               $this->part_vals []= $elem1060;
 -              $elem1022 = null;
 -              $xfer += $input->readString($elem1022);
 -              $this->part_vals []= $elem1022;
++              $elem1036 = null;
++              $xfer += $input->readString($elem1036);
++              $this->part_vals []= $elem1036;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27034,9 -26255,9 +26982,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1061)
 -          foreach ($this->part_vals as $iter1023)
++          foreach ($this->part_vals as $iter1037)
            {
-             $xfer += $output->writeString($iter1061);
 -            $xfer += $output->writeString($iter1023);
++            $xfer += $output->writeString($iter1037);
            }
          }
          $output->writeListEnd();
@@@ -27890,14 -27111,14 +27838,14 @@@ class ThriftHiveMetastore_drop_partitio
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1062 = 0;
-             $_etype1065 = 0;
-             $xfer += $input->readListBegin($_etype1065, $_size1062);
-             for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066)
 -            $_size1024 = 0;
 -            $_etype1027 = 0;
 -            $xfer += $input->readListBegin($_etype1027, $_size1024);
 -            for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028)
++            $_size1038 = 0;
++            $_etype1041 = 0;
++            $xfer += $input->readListBegin($_etype1041, $_size1038);
++            for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042)
              {
-               $elem1067 = null;
-               $xfer += $input->readString($elem1067);
-               $this->part_vals []= $elem1067;
 -              $elem1029 = null;
 -              $xfer += $input->readString($elem1029);
 -              $this->part_vals []= $elem1029;
++              $elem1043 = null;
++              $xfer += $input->readString($elem1043);
++              $this->part_vals []= $elem1043;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27942,9 -27163,9 +27890,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1068)
 -          foreach ($this->part_vals as $iter1030)
++          foreach ($this->part_vals as $iter1044)
            {
-             $xfer += $output->writeString($iter1068);
 -            $xfer += $output->writeString($iter1030);
++            $xfer += $output->writeString($iter1044);
            }
          }
          $output->writeListEnd();
@@@ -28197,14 -27418,14 +28145,14 @@@ class ThriftHiveMetastore_drop_partitio
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1069 = 0;
-             $_etype1072 = 0;
-             $xfer += $input->readListBegin($_etype1072, $_size1069);
-             for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073)
 -            $_size1031 = 0;
 -            $_etype1034 = 0;
 -            $xfer += $input->readListBegin($_etype1034, $_size1031);
 -            for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035)
++            $_size1045 = 0;
++            $_etype1048 = 0;
++            $xfer += $input->readListBegin($_etype1048, $_size1045);
++            for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049)
              {
-               $elem1074 = null;
-               $xfer += $input->readString($elem1074);
-               $this->part_vals []= $elem1074;
 -              $elem1036 = null;
 -              $xfer += $input->readString($elem1036);
 -              $this->part_vals []= $elem1036;
++              $elem1050 = null;
++              $xfer += $input->readString($elem1050);
++              $this->part_vals []= $elem1050;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -28257,9 -27478,9 +28205,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1075)
 -          foreach ($this->part_vals as $iter1037)
++          foreach ($this->part_vals as $iter1051)
            {
-             $xfer += $output->writeString($iter1075);
 -            $xfer += $output->writeString($iter1037);
++            $xfer += $output->writeString($iter1051);
            }
          }
          $output->writeListEnd();
@@@ -29273,14 -28494,14 +29221,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1076 = 0;
-             $_etype1079 = 0;
-             $xfer += $input->readListBegin($_etype1079, $_size1076);
-             for ($_i1080 = 0; $_i1080 < $_size1076; ++$_i1080)
 -            $_size1038 = 0;
 -            $_etype1041 = 0;
 -            $xfer += $input->readListBegin($_etype1041, $_size1038);
 -            for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042)
++            $_size1052 = 0;
++            $_etype1055 = 0;
++            $xfer += $input->readListBegin($_etype1055, $_size1052);
++            for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056)
              {
-               $elem1081 = null;
-               $xfer += $input->readString($elem1081);
-               $this->part_vals []= $elem1081;
 -              $elem1043 = null;
 -              $xfer += $input->readString($elem1043);
 -              $this->part_vals []= $elem1043;
++              $elem1057 = null;
++              $xfer += $input->readString($elem1057);
++              $this->part_vals []= $elem1057;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -29318,9 -28539,9 +29266,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1082)
 -          foreach ($this->part_vals as $iter1044)
++          foreach ($this->part_vals as $iter1058)
            {
-             $xfer += $output->writeString($iter1082);
 -            $xfer += $output->writeString($iter1044);
++            $xfer += $output->writeString($iter1058);
            }
          }
          $output->writeListEnd();
@@@ -29562,17 -28783,17 +29510,17 @@@ class ThriftHiveMetastore_exchange_part
          case 1:
            if ($ftype == TType::MAP) {
              $this->partitionSpecs = array();
-             $_size1083 = 0;
-             $_ktype1084 = 0;
-             $_vtype1085 = 0;
-             $xfer += $input->readMapBegin($_ktype1084, $_vtype1085, $_size1083);
-             for ($_i1087 = 0; $_i1087 < $_size1083; ++$_i1087)
 -            $_size1045 = 0;
 -            $_ktype1046 = 0;
 -            $_vtype1047 = 0;
 -            $xfer += $input->readMapBegin($_ktype1046, $_vtype1047, $_size1045);
 -            for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049)
++            $_size1059 = 0;
++            $_ktype1060 = 0;
++            $_vtype1061 = 0;
++            $xfer += $input->readMapBegin($_ktype1060, $_vtype1061, $_size1059);
++            for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063)
              {
-               $key1088 = '';
-               $val1089 = '';
-               $xfer += $input->readString($key1088);
-               $xfer += $input->readString($val1089);
-               $this->partitionSpecs[$key1088] = $val1089;
 -              $key1050 = '';
 -              $val1051 = '';
 -              $xfer += $input->readString($key1050);
 -              $xfer += $input->readString($val1051);
 -              $this->partitionSpecs[$key1050] = $val1051;
++              $key1064 = '';
++              $val1065 = '';
++              $xfer += $input->readString($key1064);
++              $xfer += $input->readString($val1065);
++              $this->partitionSpecs[$key1064] = $val1065;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -29628,10 -28849,10 +29576,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
          {
-           foreach ($this->partitionSpecs as $kiter1090 => $viter1091)
 -          foreach ($this->partitionSpecs as $kiter1052 => $viter1053)
++          foreach ($this->partitionSpecs as $kiter1066 => $viter1067)
            {
-             $xfer += $output->writeString($kiter1090);
-             $xfer += $output->writeString($viter1091);
 -            $xfer += $output->writeString($kiter1052);
 -            $xfer += $output->writeString($viter1053);
++            $xfer += $output->writeString($kiter1066);
++            $xfer += $output->writeString($viter1067);
            }
          }
          $output->writeMapEnd();
@@@ -29943,17 -29164,17 +29891,17 @@@ class ThriftHiveMetastore_exchange_part
          case 1:
            if ($ftype == TType::MAP) {
              $this->partitionSpecs = array();
-             $_size1092 = 0;
-             $_ktype1093 = 0;
-             $_vtype1094 = 0;
-             $xfer += $input->readMapBegin($_ktype1093, $_vtype1094, $_size1092);
-             for ($_i1096 = 0; $_i1096 < $_size1092; ++$_i1096)
 -            $_size1054 = 0;
 -            $_ktype1055 = 0;
 -            $_vtype1056 = 0;
 -            $xfer += $input->readMapBegin($_ktype1055, $_vtype1056, $_size1054);
 -            for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058)
++            $_size1068 = 0;
++            $_ktype1069 = 0;
++            $_vtype1070 = 0;
++            $xfer += $input->readMapBegin($_ktype1069, $_vtype1070, $_size1068);
++            for ($_i1072 = 0; $_i1072 < $_size1068; ++$_i1072)
              {
-               $key1097 = '';
-               $val1098 = '';
-               $xfer += $input->readString($key1097);
-               $xfer += $input->readString($val1098);
-               $this->partitionSpecs[$key1097] = $val1098;
 -              $key1059 = '';
 -              $val1060 = '';
 -              $xfer += $input->readString($key1059);
 -              $xfer += $input->readString($val1060);
 -              $this->partitionSpecs[$key1059] = $val1060;
++              $key1073 = '';
++              $val1074 = '';
++              $xfer += $input->readString($key1073);
++              $xfer += $input->readString($val1074);
++              $this->partitionSpecs[$key1073] = $val1074;
              }
              $xfer += $input->readMapEnd();
            } else {
@@@ -30009,10 -29230,10 +29957,10 @@@
        {
          $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
          {
-           foreach ($this->partitionSpecs as $kiter1099 => $viter1100)
 -          foreach ($this->partitionSpecs as $kiter1061 => $viter1062)
++          foreach ($this->partitionSpecs as $kiter1075 => $viter1076)
            {
-             $xfer += $output->writeString($kiter1099);
-             $xfer += $output->writeString($viter1100);
 -            $xfer += $output->writeString($kiter1061);
 -            $xfer += $output->writeString($viter1062);
++            $xfer += $output->writeString($kiter1075);
++            $xfer += $output->writeString($viter1076);
            }
          }
          $output->writeMapEnd();
@@@ -30145,15 -29366,15 +30093,15 @@@ class ThriftHiveMetastore_exchange_part
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1101 = 0;
-             $_etype1104 = 0;
-             $xfer += $input->readListBegin($_etype1104, $_size1101);
-             for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105)
 -            $_size1063 = 0;
 -            $_etype1066 = 0;
 -            $xfer += $input->readListBegin($_etype1066, $_size1063);
 -            for ($_i1067 = 0; $_i1067 < $_size1063; ++$_i1067)
++            $_size1077 = 0;
++            $_etype1080 = 0;
++            $xfer += $input->readListBegin($_etype1080, $_size1077);
++            for ($_i1081 = 0; $_i1081 < $_size1077; ++$_i1081)
              {
-               $elem1106 = null;
-               $elem1106 = new \metastore\Partition();
-               $xfer += $elem1106->read($input);
-               $this->success []= $elem1106;
 -              $elem1068 = null;
 -              $elem1068 = new \metastore\Partition();
 -              $xfer += $elem1068->read($input);
 -              $this->success []= $elem1068;
++              $elem1082 = null;
++              $elem1082 = new \metastore\Partition();
++              $xfer += $elem1082->read($input);
++              $this->success []= $elem1082;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -30213,9 -29434,9 +30161,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1107)
 -          foreach ($this->success as $iter1069)
++          foreach ($this->success as $iter1083)
            {
-             $xfer += $iter1107->write($output);
 -            $xfer += $iter1069->write($output);
++            $xfer += $iter1083->write($output);
            }
          }
          $output->writeListEnd();
@@@ -30361,14 -29582,14 +30309,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1108 = 0;
-             $_etype1111 = 0;
-             $xfer += $input->readListBegin($_etype1111, $_size1108);
-             for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112)
 -            $_size1070 = 0;
 -            $_etype1073 = 0;
 -            $xfer += $input->readListBegin($_etype1073, $_size1070);
 -            for ($_i1074 = 0; $_i1074 < $_size1070; ++$_i1074)
++            $_size1084 = 0;
++            $_etype1087 = 0;
++            $xfer += $input->readListBegin($_etype1087, $_size1084);
++            for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088)
              {
-               $elem1113 = null;
-               $xfer += $input->readString($elem1113);
-               $this->part_vals []= $elem1113;
 -              $elem1075 = null;
 -              $xfer += $input->readString($elem1075);
 -              $this->part_vals []= $elem1075;
++              $elem1089 = null;
++              $xfer += $input->readString($elem1089);
++              $this->part_vals []= $elem1089;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -30385,14 -29606,14 +30333,14 @@@
          case 5:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size1114 = 0;
-             $_etype1117 = 0;
-             $xfer += $input->readListBegin($_etype1117, $_size1114);
-             for ($_i1118 = 0; $_i1118 < $_size1114; ++$_i1118)
 -            $_size1076 = 0;
 -            $_etype1079 = 0;
 -            $xfer += $input->readListBegin($_etype1079, $_size1076);
 -            for ($_i1080 = 0; $_i1080 < $_size1076; ++$_i1080)
++            $_size1090 = 0;
++            $_etype1093 = 0;
++            $xfer += $input->readListBegin($_etype1093, $_size1090);
++            for ($_i1094 = 0; $_i1094 < $_size1090; ++$_i1094)
              {
-               $elem1119 = null;
-               $xfer += $input->readString($elem1119);
-               $this->group_names []= $elem1119;
 -              $elem1081 = null;
 -              $xfer += $input->readString($elem1081);
 -              $this->group_names []= $elem1081;
++              $elem1095 = null;
++              $xfer += $input->readString($elem1095);
++              $this->group_names []= $elem1095;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -30430,9 -29651,9 +30378,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1120)
 -          foreach ($this->part_vals as $iter1082)
++          foreach ($this->part_vals as $iter1096)
            {
-             $xfer += $output->writeString($iter1120);
 -            $xfer += $output->writeString($iter1082);
++            $xfer += $output->writeString($iter1096);
            }
          }
          $output->writeListEnd();
@@@ -30452,9 -29673,9 +30400,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter1121)
 -          foreach ($this->group_names as $iter1083)
++          foreach ($this->group_names as $iter1097)
            {
-             $xfer += $output->writeString($iter1121);
 -            $xfer += $output->writeString($iter1083);
++            $xfer += $output->writeString($iter1097);
            }
          }
          $output->writeListEnd();
@@@ -31045,15 -30266,15 +30993,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1122 = 0;
-             $_etype1125 = 0;
-             $xfer += $input->readListBegin($_etype1125, $_size1122);
-             for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126)
 -            $_size1084 = 0;
 -            $_etype1087 = 0;
 -            $xfer += $input->readListBegin($_etype1087, $_size1084);
 -            for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088)
++            $_size1098 = 0;
++            $_etype1101 = 0;
++            $xfer += $input->readListBegin($_etype1101, $_size1098);
++            for ($_i1102 = 0; $_i1102 < $_size1098; ++$_i1102)
              {
-               $elem1127 = null;
-               $elem1127 = new \metastore\Partition();
-               $xfer += $elem1127->read($input);
-               $this->success []= $elem1127;
 -              $elem1089 = null;
 -              $elem1089 = new \metastore\Partition();
 -              $xfer += $elem1089->read($input);
 -              $this->success []= $elem1089;
++              $elem1103 = null;
++              $elem1103 = new \metastore\Partition();
++              $xfer += $elem1103->read($input);
++              $this->success []= $elem1103;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31097,9 -30318,9 +31045,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1128)
 -          foreach ($this->success as $iter1090)
++          foreach ($this->success as $iter1104)
            {
-             $xfer += $iter1128->write($output);
 -            $xfer += $iter1090->write($output);
++            $xfer += $iter1104->write($output);
            }
          }
          $output->writeListEnd();
@@@ -31245,14 -30466,14 +31193,14 @@@ class ThriftHiveMetastore_get_partition
          case 5:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size1129 = 0;
-             $_etype1132 = 0;
-             $xfer += $input->readListBegin($_etype1132, $_size1129);
-             for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133)
 -            $_size1091 = 0;
 -            $_etype1094 = 0;
 -            $xfer += $input->readListBegin($_etype1094, $_size1091);
 -            for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095)
++            $_size1105 = 0;
++            $_etype1108 = 0;
++            $xfer += $input->readListBegin($_etype1108, $_size1105);
++            for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109)
              {
-               $elem1134 = null;
-               $xfer += $input->readString($elem1134);
-               $this->group_names []= $elem1134;
 -              $elem1096 = null;
 -              $xfer += $input->readString($elem1096);
 -              $this->group_names []= $elem1096;
++              $elem1110 = null;
++              $xfer += $input->readString($elem1110);
++              $this->group_names []= $elem1110;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31300,9 -30521,9 +31248,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter1135)
 -          foreach ($this->group_names as $iter1097)
++          foreach ($this->group_names as $iter1111)
            {
-             $xfer += $output->writeString($iter1135);
 -            $xfer += $output->writeString($iter1097);
++            $xfer += $output->writeString($iter1111);
            }
          }
          $output->writeListEnd();
@@@ -31391,15 -30612,15 +31339,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1136 = 0;
-             $_etype1139 = 0;
-             $xfer += $input->readListBegin($_etype1139, $_size1136);
-             for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140)
 -            $_size1098 = 0;
 -            $_etype1101 = 0;
 -            $xfer += $input->readListBegin($_etype1101, $_size1098);
 -            for ($_i1102 = 0; $_i1102 < $_size1098; ++$_i1102)
++            $_size1112 = 0;
++            $_etype1115 = 0;
++            $xfer += $input->readListBegin($_etype1115, $_size1112);
++            for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116)
              {
-               $elem1141 = null;
-               $elem1141 = new \metastore\Partition();
-               $xfer += $elem1141->read($input);
-               $this->success []= $elem1141;
 -              $elem1103 = null;
 -              $elem1103 = new \metastore\Partition();
 -              $xfer += $elem1103->read($input);
 -              $this->success []= $elem1103;
++              $elem1117 = null;
++              $elem1117 = new \metastore\Partition();
++              $xfer += $elem1117->read($input);
++              $this->success []= $elem1117;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31443,9 -30664,9 +31391,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1142)
 -          foreach ($this->success as $iter1104)
++          foreach ($this->success as $iter1118)
            {
-             $xfer += $iter1142->write($output);
 -            $xfer += $iter1104->write($output);
++            $xfer += $iter1118->write($output);
            }
          }
          $output->writeListEnd();
@@@ -31665,15 -30886,15 +31613,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1143 = 0;
-             $_etype1146 = 0;
-             $xfer += $input->readListBegin($_etype1146, $_size1143);
-             for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147)
 -            $_size1105 = 0;
 -            $_etype1108 = 0;
 -            $xfer += $input->readListBegin($_etype1108, $_size1105);
 -            for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109)
++            $_size1119 = 0;
++            $_etype1122 = 0;
++            $xfer += $input->readListBegin($_etype1122, $_size1119);
++            for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123)
              {
-               $elem1148 = null;
-               $elem1148 = new \metastore\PartitionSpec();
-               $xfer += $elem1148->read($input);
-               $this->success []= $elem1148;
 -              $elem1110 = null;
 -              $elem1110 = new \metastore\PartitionSpec();
 -              $xfer += $elem1110->read($input);
 -              $this->success []= $elem1110;
++              $elem1124 = null;
++              $elem1124 = new \metastore\PartitionSpec();
++              $xfer += $elem1124->read($input);
++              $this->success []= $elem1124;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31717,9 -30938,9 +31665,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1149)
 -          foreach ($this->success as $iter1111)
++          foreach ($this->success as $iter1125)
            {
-             $xfer += $iter1149->write($output);
 -            $xfer += $iter1111->write($output);
++            $xfer += $iter1125->write($output);
            }
          }
          $output->writeListEnd();
@@@ -31938,14 -31159,14 +31886,14 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1150 = 0;
-             $_etype1153 = 0;
-             $xfer += $input->readListBegin($_etype1153, $_size1150);
-             for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154)
 -            $_size1112 = 0;
 -            $_etype1115 = 0;
 -            $xfer += $input->readListBegin($_etype1115, $_size1112);
 -            for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116)
++            $_size1126 = 0;
++            $_etype1129 = 0;
++            $xfer += $input->readListBegin($_etype1129, $_size1126);
++            for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130)
              {
-               $elem1155 = null;
-               $xfer += $input->readString($elem1155);
-               $this->success []= $elem1155;
 -              $elem1117 = null;
 -              $xfer += $input->readString($elem1117);
 -              $this->success []= $elem1117;
++              $elem1131 = null;
++              $xfer += $input->readString($elem1131);
++              $this->success []= $elem1131;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31989,9 -31210,9 +31937,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->success));
          {
-           foreach ($this->success as $iter1156)
 -          foreach ($this->success as $iter1118)
++          foreach ($this->success as $iter1132)
            {
-             $xfer += $output->writeString($iter1156);
 -            $xfer += $output->writeString($iter1118);
++            $xfer += $output->writeString($iter1132);
            }
          }
          $output->writeListEnd();
@@@ -32322,14 -31543,14 +32270,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1157 = 0;
-             $_etype1160 = 0;
-             $xfer += $input->readListBegin($_etype1160, $_size1157);
-             for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161)
 -            $_size1119 = 0;
 -            $_etype1122 = 0;
 -            $xfer += $input->readListBegin($_etype1122, $_size1119);
 -            for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123)
++            $_size1133 = 0;
++            $_etype1136 = 0;
++            $xfer += $input->readListBegin($_etype1136, $_size1133);
++            for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137)
              {
-               $elem1162 = null;
-               $xfer += $input->readString($elem1162);
-               $this->part_vals []= $elem1162;
 -              $elem1124 = null;
 -              $xfer += $input->readString($elem1124);
 -              $this->part_vals []= $elem1124;
++              $elem1138 = null;
++              $xfer += $input->readString($elem1138);
++              $this->part_vals []= $elem1138;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -32374,9 -31595,9 +32322,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1163)
 -          foreach ($this->part_vals as $iter1125)
++          foreach ($this->part_vals as $iter1139)
            {
-             $xfer += $output->writeString($iter1163);
 -            $xfer += $output->writeString($iter1125);
++            $xfer += $output->writeString($iter1139);
            }
          }
          $output->writeListEnd();
@@@ -32470,15 -31691,15 +32418,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1164 = 0;
-             $_etype1167 = 0;
-             $xfer += $input->readListBegin($_etype1167, $_size1164);
-             for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168)
 -            $_size1126 = 0;
 -            $_etype1129 = 0;
 -            $xfer += $input->readListBegin($_etype1129, $_size1126);
 -            for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130)
++            $_size1140 = 0;
++            $_etype1143 = 0;
++            $xfer += $input->readListBegin($_etype1143, $_size1140);
++            for ($_i1144 = 0; $_i1144 < $_size1140; ++$_i1144)
              {
-               $elem1169 = null;
-               $elem1169 = new \metastore\Partition();
-               $xfer += $elem1169->read($input);
-               $this->success []= $elem1169;
 -              $elem1131 = null;
 -              $elem1131 = new \metastore\Partition();
 -              $xfer += $elem1131->read($input);
 -              $this->success []= $elem1131;
++              $elem1145 = null;
++              $elem1145 = new \metastore\Partition();
++              $xfer += $elem1145->read($input);
++              $this->success []= $elem1145;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -32522,9 -31743,9 +32470,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1170)
 -          foreach ($this->success as $iter1132)
++          foreach ($this->success as $iter1146)
            {
-             $xfer += $iter1170->write($output);
 -            $xfer += $iter1132->write($output);
++            $xfer += $iter1146->write($output);
            }
          }
          $output->writeListEnd();
@@@ -32671,14 -31892,14 +32619,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1171 = 0;
-             $_etype1174 = 0;
-             $xfer += $input->readListBegin($_etype1174, $_size1171);
-             for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175)
 -            $_size1133 = 0;
 -            $_etype1136 = 0;
 -            $xfer += $input->readListBegin($_etype1136, $_size1133);
 -            for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137)
++            $_size1147 = 0;
++            $_etype1150 = 0;
++            $xfer += $input->readListBegin($_etype1150, $_size1147);
++            for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151)
              {
-               $elem1176 = null;
-               $xfer += $input->readString($elem1176);
-               $this->part_vals []= $elem1176;
 -              $elem1138 = null;
 -              $xfer += $input->readString($elem1138);
 -              $this->part_vals []= $elem1138;
++              $elem1152 = null;
++              $xfer += $input->readString($elem1152);
++              $this->part_vals []= $elem1152;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -32702,14 -31923,14 +32650,14 @@@
          case 6:
            if ($ftype == TType::LST) {
              $this->group_names = array();
-             $_size1177 = 0;
-             $_etype1180 = 0;
-             $xfer += $input->readListBegin($_etype1180, $_size1177);
-             for ($_i1181 = 0; $_i1181 < $_size1177; ++$_i1181)
 -            $_size1139 = 0;
 -            $_etype1142 = 0;
 -            $xfer += $input->readListBegin($_etype1142, $_size1139);
 -            for ($_i1143 = 0; $_i1143 < $_size1139; ++$_i1143)
++            $_size1153 = 0;
++            $_etype1156 = 0;
++            $xfer += $input->readListBegin($_etype1156, $_size1153);
++            for ($_i1157 = 0; $_i1157 < $_size1153; ++$_i1157)
              {
-               $elem1182 = null;
-               $xfer += $input->readString($elem1182);
-               $this->group_names []= $elem1182;
 -              $elem1144 = null;
 -              $xfer += $input->readString($elem1144);
 -              $this->group_names []= $elem1144;
++              $elem1158 = null;
++              $xfer += $input->readString($elem1158);
++              $this->group_names []= $elem1158;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -32747,9 -31968,9 +32695,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->part_vals));
          {
-           foreach ($this->part_vals as $iter1183)
 -          foreach ($this->part_vals as $iter1145)
++          foreach ($this->part_vals as $iter1159)
            {
-             $xfer += $output->writeString($iter1183);
 -            $xfer += $output->writeString($iter1145);
++            $xfer += $output->writeString($iter1159);
            }
          }
          $output->writeListEnd();
@@@ -32774,9 -31995,9 +32722,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->group_names));
          {
-           foreach ($this->group_names as $iter1184)
 -          foreach ($this->group_names as $iter1146)
++          foreach ($this->group_names as $iter1160)
            {
-             $xfer += $output->writeString($iter1184);
 -            $xfer += $output->writeString($iter1146);
++            $xfer += $output->writeString($iter1160);
            }
          }
          $output->writeListEnd();
@@@ -32865,15 -32086,15 +32813,15 @@@ class ThriftHiveMetastore_get_partition
          case 0:
            if ($ftype == TType::LST) {
              $this->success = array();
-             $_size1185 = 0;
-             $_etype1188 = 0;
-             $xfer += $input->readListBegin($_etype1188, $_size1185);
-             for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189)
 -            $_size1147 = 0;
 -            $_etype1150 = 0;
 -            $xfer += $input->readListBegin($_etype1150, $_size1147);
 -            for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151)
++            $_size1161 = 0;
++            $_etype1164 = 0;
++            $xfer += $input->readListBegin($_etype1164, $_size1161);
++            for ($_i1165 = 0; $_i1165 < $_size1161; ++$_i1165)
              {
-               $elem1190 = null;
-               $elem1190 = new \metastore\Partition();
-               $xfer += $elem1190->read($input);
-               $this->success []= $elem1190;
 -              $elem1152 = null;
 -              $elem1152 = new \metastore\Partition();
 -              $xfer += $elem1152->read($input);
 -              $this->success []= $elem1152;
++              $elem1166 = null;
++              $elem1166 = new \metastore\Partition();
++              $xfer += $elem1166->read($input);
++              $this->success []= $elem1166;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -32917,9 -32138,9 +32865,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->success));
          {
-           foreach ($this->success as $iter1191)
 -          foreach ($this->success as $iter1153)
++          foreach ($this->success as $iter1167)
            {
-             $xfer += $iter1191->write($output);
 -            $xfer += $iter1153->write($output);
++            $xfer += $iter1167->write($output);
            }
          }
          $output->writeListEnd();
@@@ -33040,14 -32261,14 +32988,14 @@@ class ThriftHiveMetastore_get_partition
          case 3:
            if ($ftype == TType::LST) {
              $this->part_vals = array();
-             $_size1192 = 0;
-             $_etype1195 = 0;
-             $xfer += $input->readListBegin($_etype1195, $_size1192);
-             for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196)
 -            $_size1154 = 0;
 -            $_etype115

<TRUNCATED>

[28/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1b5903b0
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1b5903b0
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1b5903b0

Branch: refs/heads/master-txnstats
Commit: 1b5903b035c3b3ac02efbddf36d5438cda97cc91
Parents: ab9e954
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Tue Jun 26 11:37:27 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Jul 13 23:06:53 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   26 +-
 .../hive/ql/exec/MaterializedViewTask.java      |    2 -
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  124 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |   21 +-
 ...terialized_view_create_rewrite_time_window.q |    4 +-
 .../clientpositive/druid/druidmini_mv.q.out     |   85 +-
 .../materialized_view_create_rewrite_5.q.out    |    4 +-
 ...alized_view_create_rewrite_time_window.q.out |   16 +-
 .../llap/materialized_view_rewrite_empty.q.out  |    4 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  | 2351 +++++++-------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |   52 +-
 .../ThriftHiveMetastore_server.skeleton.cpp     |    2 +-
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp | 1442 +++++----
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |   45 +-
 .../hive/metastore/api/CreationMetadata.java    |  111 +-
 .../metastore/api/FindSchemasByColsResp.java    |   36 +-
 .../hive/metastore/api/Materialization.java     |  409 +--
 .../hive/metastore/api/SchemaVersion.java       |   36 +-
 .../hive/metastore/api/ThriftHiveMetastore.java | 2858 +++++++++---------
 .../hive/metastore/api/WMFullResourcePlan.java  |  144 +-
 .../api/WMGetAllResourcePlanResponse.java       |   36 +-
 .../WMGetTriggersForResourePlanResponse.java    |   36 +-
 .../api/WMValidateResourcePlanResponse.java     |   64 +-
 .../gen-php/metastore/ThriftHiveMetastore.php   | 1510 +++++----
 .../src/gen/thrift/gen-php/metastore/Types.php  |  324 +-
 .../hive_metastore/ThriftHiveMetastore-remote   |    4 +-
 .../hive_metastore/ThriftHiveMetastore.py       | 1015 +++----
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  208 +-
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   16 +-
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   20 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   10 +-
 .../hive/metastore/HiveMetaStoreClient.java     |    7 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |    2 +-
 .../MaterializationsCacheCleanerTask.java       |   63 -
 .../MaterializationsInvalidationCache.java      |  543 ----
 .../MaterializationsRebuildLockCleanerTask.java |   30 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   20 +-
 .../hive/metastore/conf/MetastoreConf.java      |    6 +-
 .../hive/metastore/model/MCreationMetadata.java |   16 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   13 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  309 +-
 .../hadoop/hive/metastore/txn/TxnStore.java     |   38 +-
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   14 +-
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   14 +-
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   19 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   17 +-
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   18 +-
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   19 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   12 +-
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   12 +-
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   20 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   14 +-
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   14 +-
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |   19 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   14 +-
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   26 +-
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |   19 +
 .../src/main/thrift/hive_metastore.thrift       |    8 +-
 .../HiveMetaStoreClientPreCatalog.java          |    7 +-
 ...stMetaStoreMaterializationsCacheCleaner.java |  328 --
 .../TestTablesCreateDropAlterTruncate.java      |    1 +
 61 files changed, 5720 insertions(+), 6937 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 41fae36..858c630 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -1633,13 +1633,13 @@ public class HiveConf extends Configuration {
             "choosing the plan with lower cost among possible plans containing a materialized view\n" +
             "  costbased: Fully cost-based strategy, always use plan with lower cost, independently on whether " +
             "it uses a materialized view or not"),
-    HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW("hive.materializedview.rewriting.time.window", "0s", new TimeValidator(TimeUnit.SECONDS),
+    HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW("hive.materializedview.rewriting.time.window", "0min", new TimeValidator(TimeUnit.MINUTES),
         "Time window, specified in seconds, after which outdated materialized views become invalid for automatic query rewriting.\n" +
-        "For instance, if a materialized view is created and afterwards one of its source tables is changed at " +
-        "moment in time t0, the materialized view will not be considered for rewriting anymore after t0 plus " +
-        "the value assigned to this property. Default value 0 means that the materialized view cannot be " +
-        "outdated to be used automatically in query rewriting."),
-    HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL("hive.materializedview.rewriting.incremental", true,
+        "For instance, if more time than the value assigned to the property has passed since the materialized view " +
+        "was created or rebuilt, and one of its source tables has changed since, the materialized view will not be " +
+        "considered for rewriting. Default value 0 means that the materialized view cannot be " +
+        "outdated to be used automatically in query rewriting. Value -1 means to skip this check."),
+    HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL("hive.materializedview.rewriting.incremental", false,
         "Whether to try to execute incremental rewritings based on outdated materializations and\n" +
         "current content of tables. Default value of true effectively amounts to enabling incremental\n" +
         "rebuild for the materializations too."),
@@ -1647,24 +1647,12 @@ public class HiveConf extends Configuration {
         "Whether to try to execute incremental rebuild for the materialized views. Incremental rebuild\n" +
         "tries to modify the original materialization contents to reflect the latest changes to the\n" +
         "materialized view source tables, instead of rebuilding the contents fully. Incremental rebuild\n" +
-        "is based on the materialized view algebraic incremental rewriting. Hence, this requires\n" +
-        "hive.materializedview.rewriting.incremental to be true."),
+        "is based on the materialized view algebraic incremental rewriting."),
     HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC",
         new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"),
         "Default file format for CREATE MATERIALIZED VIEW statement"),
     HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde",
         "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"),
-    HIVE_MATERIALIZATIONS_INVALIDATION_CACHE_IMPL("hive.metastore.materializations.invalidation.impl", "DEFAULT",
-        new StringSet("DEFAULT", "DISABLE"),
-        "The implementation that we should use for the materializations invalidation cache. \n" +
-            "  DEFAULT: Default implementation for invalidation cache\n" +
-            "  DISABLE: Disable invalidation cache (debugging purposes)"),
-    HIVE_MATERIALIZATIONS_INVALIDATION_CACHE_CLEAN_FREQUENCY("hive.metastore.materializations.invalidation.clean.frequency",
-        "3600s", new TimeValidator(TimeUnit.SECONDS), "Frequency at which timer task runs to remove unnecessary transactions information from" +
-        "materializations invalidation cache."),
-    HIVE_MATERIALIZATIONS_INVALIDATION_CACHE_EXPIRY_DURATION("hive.metastore.materializations.invalidation.max.duration",
-        "86400s", new TimeValidator(TimeUnit.SECONDS), "Maximum duration for query producing a materialization. After this time, transactions" +
-        "information that is not relevant for materializations can be removed from invalidation cache."),
 
     // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
     // need to remove by hive .13. Also, do not change default (see SMB operator)

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
index 19aef6c..87828b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java
@@ -19,9 +19,7 @@
 package org.apache.hadoop.hive.ql.exec;
 
 import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
-import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.hive.ql.DriverContext;

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index cd62d49..1fe1fb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -84,7 +84,9 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.HiveStatsUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.hive.common.log.InPlaceUpdate;
@@ -193,6 +195,7 @@ import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hive.common.util.TxnIdUtils;
 import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -1332,7 +1335,7 @@ public class Hive {
    * @return the list of materialized views available for rewriting
    * @throws HiveException
    */
-  public List<RelOptMaterialization> getAllValidMaterializedViews(boolean forceMVContentsUpToDate, String validTxnsList)
+  public List<RelOptMaterialization> getAllValidMaterializedViews(List<String> tablesUsed, boolean forceMVContentsUpToDate)
       throws HiveException {
     // Final result
     List<RelOptMaterialization> result = new ArrayList<>();
@@ -1344,7 +1347,7 @@ public class Hive {
           // Bail out: empty list
           continue;
         }
-        result.addAll(getValidMaterializedViews(dbName, materializedViewNames, forceMVContentsUpToDate, validTxnsList));
+        result.addAll(getValidMaterializedViews(dbName, materializedViewNames, tablesUsed, forceMVContentsUpToDate));
       }
       return result;
     } catch (Exception e) {
@@ -1353,14 +1356,19 @@ public class Hive {
   }
 
   public List<RelOptMaterialization> getValidMaterializedView(String dbName, String materializedViewName,
-      boolean forceMVContentsUpToDate, String validTxnsList) throws HiveException {
-    return getValidMaterializedViews(dbName, ImmutableList.of(materializedViewName), forceMVContentsUpToDate, validTxnsList);
+      List<String> tablesUsed, boolean forceMVContentsUpToDate) throws HiveException {
+    return getValidMaterializedViews(dbName, ImmutableList.of(materializedViewName), tablesUsed, forceMVContentsUpToDate);
   }
 
   private List<RelOptMaterialization> getValidMaterializedViews(String dbName, List<String> materializedViewNames,
-      boolean forceMVContentsUpToDate, String validTxnsList) throws HiveException {
+      List<String> tablesUsed, boolean forceMVContentsUpToDate) throws HiveException {
+    final String validTxnsList = conf.get(ValidTxnList.VALID_TXNS_KEY);
+    final ValidTxnWriteIdList currentTxnWriteIds =
+        SessionState.get().getTxnMgr().getValidWriteIds(tablesUsed, validTxnsList);
     final boolean tryIncrementalRewriting =
         HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL);
+    final boolean tryIncrementalRebuild =
+        HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL);
     final long defaultDiff =
         HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW,
             TimeUnit.MILLISECONDS);
@@ -1369,8 +1377,6 @@ public class Hive {
       // Final result
       List<RelOptMaterialization> result = new ArrayList<>();
       List<Table> materializedViewTables = getTableObjects(dbName, materializedViewNames);
-      Map<String, Materialization> databaseInvalidationInfo =
-          getMSC().getMaterializationsInvalidationInfo(dbName, materializedViewNames);
       for (Table materializedViewTable : materializedViewTables) {
         // Check if materialization defined its own invalidation time window
         String timeWindowString = materializedViewTable.getProperty(MATERIALIZED_VIEW_REWRITING_TIME_WINDOW);
@@ -1378,7 +1384,7 @@ public class Hive {
             HiveConf.toTime(timeWindowString,
                 HiveConf.getDefaultTimeUnit(HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW),
                 TimeUnit.MILLISECONDS);
-        Materialization materializationInvInfo = null;
+        CreationMetadata creationMetadata = materializedViewTable.getCreationMetadata();
         boolean outdated = false;
         if (diff < 0L) {
           // We only consider the materialized view to be outdated if forceOutdated = true, i.e.,
@@ -1386,40 +1392,80 @@ public class Hive {
           outdated = forceMVContentsUpToDate;
         } else {
           // Check whether the materialized view is invalidated
-          materializationInvInfo =
-              databaseInvalidationInfo.get(materializedViewTable.getTableName());
-          if (materializationInvInfo == null) {
-            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
-                " ignored for rewriting as there was no information loaded in the invalidation cache");
-            continue;
-          }
-          long invalidationTime = materializationInvInfo.getInvalidationTime();
-          if (invalidationTime == Long.MIN_VALUE) {
-            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
-                " ignored for rewriting as it contains non-transactional tables");
-            continue;
-          }
-          // If the limit is not met, we do not add the materialized view.
-          // If we are doing a rebuild, we do not consider outdated materialized views either.
-          if (diff == 0L || forceMVContentsUpToDate) {
-            if (invalidationTime != 0L) {
-              outdated = true;
+          if (forceMVContentsUpToDate || diff == 0L || creationMetadata.getMaterializationTime() < currentTime - diff) {
+            if (currentTxnWriteIds == null) {
+              LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                  " ignored for rewriting as we could not obtain current txn ids");
+              continue;
             }
-          } else {
-            if (invalidationTime != 0L && invalidationTime > currentTime - diff) {
-              outdated = true;
+            if (creationMetadata.getValidTxnList() == null ||
+                creationMetadata.getValidTxnList().isEmpty()) {
+              LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                  " ignored for rewriting as we could not obtain materialization txn ids");
+              continue;
+            }
+            boolean ignore = false;
+            ValidTxnWriteIdList mvTxnWriteIds = new ValidTxnWriteIdList(
+                creationMetadata.getValidTxnList());
+            for (String qName : tablesUsed) {
+              // Note. If the materialized view does not contain a table that is contained in the query,
+              // we do not need to check whether that specific table is outdated or not. If a rewriting
+              // is produced in those cases, it is because that additional table is joined with the
+              // existing tables with an append-columns only join, i.e., PK-FK + not null.
+              if (!creationMetadata.getTablesUsed().contains(qName)) {
+                continue;
+              }
+              ValidWriteIdList tableCurrentWriteIds = currentTxnWriteIds.getTableValidWriteIdList(qName);
+              if (tableCurrentWriteIds == null) {
+                // Uses non-transactional table, cannot be considered
+                LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                    " ignored for rewriting as it is outdated and cannot be considered for " +
+                    " rewriting because it uses non-transactional table " + qName);
+                ignore = true;
+                break;
+              }
+              ValidWriteIdList tableWriteIds = mvTxnWriteIds.getTableValidWriteIdList(qName);
+              if (tableWriteIds == null) {
+                // This should not happen, but we ignore for safety
+                LOG.warn("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                    " ignored for rewriting as details about txn ids for table " + qName +
+                    " could not be found in " + mvTxnWriteIds);
+                ignore = true;
+                break;
+              }
+              if (!outdated && !TxnIdUtils.checkEquivalentWriteIds(tableCurrentWriteIds, tableWriteIds)) {
+                LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                    " contents are outdated");
+                outdated = true;
+              }
+            }
+            if (ignore) {
+              continue;
             }
           }
         }
 
-        if (outdated && (!tryIncrementalRewriting || materializationInvInfo == null
-            || validTxnsList == null || materializationInvInfo.isSourceTablesUpdateDeleteModified())) {
-          // We will not try partial rewriting either because the config specification, this
-          // is a rebuild over some non-transactional table, or there were update/delete
-          // operations in the source tables (not supported yet)
-          LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
-              " ignored for rewriting as its contents are outdated");
-          continue;
+        if (outdated) {
+          // The MV is outdated, see whether we should consider it for rewriting or not
+          boolean ignore = false;
+          if (forceMVContentsUpToDate && !tryIncrementalRebuild) {
+            // We will not try partial rewriting for rebuild if incremental rebuild is disabled
+            ignore = true;
+          } else if (!forceMVContentsUpToDate && !tryIncrementalRewriting) {
+            // We will not try partial rewriting for non-rebuild if incremental rewriting is disabled
+            ignore = true;
+          } else {
+            // Obtain additional information if we should try incremental rewriting / rebuild
+            // We will not try partial rewriting if there were update/delete operations on source tables
+            Materialization invalidationInfo = getMSC().getMaterializationInvalidationInfo(
+                creationMetadata, conf.get(ValidTxnList.VALID_TXNS_KEY));
+            ignore = invalidationInfo == null || invalidationInfo.isSourceTablesUpdateDeleteModified();
+          }
+          if (ignore) {
+            LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() +
+                " ignored for rewriting as its contents are outdated");
+            continue;
+          }
         }
 
         // It passed the test, load
@@ -1443,7 +1489,7 @@ public class Hive {
               // so we can produce partial rewritings
               materialization = augmentMaterializationWithTimeInformation(
                   materialization, validTxnsList, new ValidTxnWriteIdList(
-                      materializationInvInfo.getValidTxnList()));
+                      creationMetadata.getValidTxnList()));
             }
             result.add(materialization);
             continue;
@@ -1466,7 +1512,7 @@ public class Hive {
               // so we can produce partial rewritings
               materialization = augmentMaterializationWithTimeInformation(
                   materialization, validTxnsList, new ValidTxnWriteIdList(
-                      materializationInvInfo.getValidTxnList()));
+                      creationMetadata.getValidTxnList()));
             }
             result.add(materialization);
           }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index e091f38..fecfd0c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -2081,18 +2081,17 @@ public class CalcitePlanner extends SemanticAnalyzer {
       // Add views to planner
       List<RelOptMaterialization> materializations = new ArrayList<>();
       try {
-        final String validTxnsList = conf.get(ValidTxnList.VALID_TXNS_KEY);
         if (mvRebuildMode != MaterializationRebuildMode.NONE) {
           // We only retrieve the materialization corresponding to the rebuild. In turn,
           // we pass 'true' for the forceMVContentsUpToDate parameter, as we cannot allow the
           // materialization contents to be stale for a rebuild if we want to use it.
           materializations = Hive.get().getValidMaterializedView(mvRebuildDbName, mvRebuildName,
-              true, validTxnsList);
+              getTablesUsed(basePlan), true);
         } else {
           // This is not a rebuild, we retrieve all the materializations. In turn, we do not need
           // to force the materialization contents to be up-to-date, as this is not a rebuild, and
           // we apply the user parameters (HIVE_MATERIALIZED_VIEW_REWRITING_TIME_WINDOW) instead.
-          materializations = Hive.get().getAllValidMaterializedViews(false, validTxnsList);
+          materializations = Hive.get().getAllValidMaterializedViews(getTablesUsed(basePlan), false);
         }
         // We need to use the current cluster for the scan operator on views,
         // otherwise the planner will throw an Exception (different planners)
@@ -2169,7 +2168,6 @@ public class CalcitePlanner extends SemanticAnalyzer {
           // A rewriting was produced, we will check whether it was part of an incremental rebuild
           // to try to replace INSERT OVERWRITE by INSERT
           if (mvRebuildMode == MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD &&
-              HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REWRITING_INCREMENTAL) &&
               HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_REBUILD_INCREMENTAL)) {
             // First we need to check if it is valid to convert to MERGE/INSERT INTO.
             // If we succeed, we modify the plan and afterwards the AST.
@@ -2196,6 +2194,21 @@ public class CalcitePlanner extends SemanticAnalyzer {
       return basePlan;
     }
 
+    private List<String> getTablesUsed(RelNode plan) {
+      List<String> tablesUsed = new ArrayList<>();
+      new RelVisitor() {
+        @Override
+        public void visit(RelNode node, int ordinal, RelNode parent) {
+          if (node instanceof TableScan) {
+            TableScan ts = (TableScan) node;
+            tablesUsed.add(((RelOptHiveTable) ts.getTable()).getHiveTableMD().getFullyQualifiedName());
+          }
+          super.visit(node, ordinal, parent);
+        }
+      }.go(plan);
+      return tablesUsed;
+    }
+
     /**
      * Run the HEP Planner with the given rule set.
      *

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q
index c61730e..55c6c04 100644
--- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q
+++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_time_window.q
@@ -25,13 +25,13 @@ analyze table cmv_basetable_2_n1 compute statistics for columns;
 
 -- CREATE VIEW WITH REWRITE DISABLED
 EXPLAIN
-CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0
   GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c;
 
-CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
index 383f2dc..54a4ef6 100644
--- a/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
+++ b/ql/src/test/results/clientpositive/druid/druidmini_mv.q.out
@@ -162,31 +162,35 @@ STAGE PLANS:
       limit: -1
       Processor Tree:
         TableScan
-          alias: cmv_basetable_n2
-          filterExpr: (a = 3) (type: boolean)
-          Filter Operator
-            predicate: (a = 3) (type: boolean)
-            Select Operator
-              expressions: 3 (type: int), c (type: double)
-              outputColumnNames: _col0, _col1
-              ListSink
+          alias: cmv_mat_view2_n0
+          properties:
+            druid.fieldNames vc,c
+            druid.fieldTypes int,double
+            druid.query.json {"queryType":"scan","dataSource":"default.cmv_mat_view2_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"virtualColumns":[{"type":"expression","name":"vc","expression":"3","outputType":"LONG"}],"columns":["vc","c"],"resultFormat":"compactedList"}
+            druid.query.type scan
+          Select Operator
+            expressions: vc (type: int), c (type: double)
+            outputColumnNames: _col0, _col1
+            ListSink
 
 PREHOOK: query: SELECT a, c
 FROM cmv_basetable_n2
 WHERE a = 3
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cmv_basetable_n2
+PREHOOK: Input: default@cmv_mat_view2_n0
 PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: SELECT a, c
 FROM cmv_basetable_n2
 WHERE a = 3
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@cmv_basetable_n2
+POSTHOOK: Input: default@cmv_mat_view2_n0
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-3	15.8
-3	9.8
-3	978.76
-Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+3	15.800000190734863
+3	9.800000190734863
+3	978.760009765625
+Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: EXPLAIN
 SELECT * FROM (
   (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1
@@ -217,36 +221,33 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: cmv_basetable_n2
-                  filterExpr: (a = 3) (type: boolean)
-                  Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: (a = 3) (type: boolean)
-                    Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
-                    Select Operator
-                      expressions: c (type: double)
-                      outputColumnNames: _col0
-                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        sort order: 
-                        Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: double)
-        Map 3 
-            Map Operator Tree:
-                TableScan
-                  alias: cmv_basetable_n2
-                  filterExpr: ((d = 3) and (a = 3)) (type: boolean)
+                  filterExpr: ((a = 3) and (d = 3)) (type: boolean)
                   Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                   Filter Operator
                     predicate: ((a = 3) and (d = 3)) (type: boolean)
                     Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                     Select Operator
                       expressions: c (type: double)
-                      outputColumnNames: _col0
+                      outputColumnNames: _col1
                       Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
                       Reduce Output Operator
                         sort order: 
                         Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: NONE
-                        value expressions: _col0 (type: double)
+                        value expressions: _col1 (type: double)
+        Map 3 
+            Map Operator Tree:
+                TableScan
+                  alias: cmv_mat_view2_n0
+                  properties:
+                    druid.fieldNames a,c
+                    druid.fieldTypes int,double
+                    druid.query.json {"queryType":"scan","dataSource":"default.cmv_mat_view2_n0","intervals":["1900-01-01T00:00:00.000Z/3000-01-01T00:00:00.000Z"],"columns":["a","c"],"resultFormat":"compactedList"}
+                    druid.query.type scan
+                  Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 3 Data size: 36 Basic stats: COMPLETE Column stats: NONE
+                    value expressions: a (type: int), c (type: double)
         Reducer 2 
             Reduce Operator Tree:
               Merge Join Operator
@@ -255,15 +256,15 @@ STAGE PLANS:
                 keys:
                   0 
                   1 
-                outputColumnNames: _col0, _col1
-                Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                outputColumnNames: _col0, _col1, _col6
+                Statistics: Num rows: 18 Data size: 522 Basic stats: COMPLETE Column stats: NONE
                 Select Operator
-                  expressions: 3 (type: int), _col0 (type: double), 3 (type: int), _col1 (type: double)
+                  expressions: _col0 (type: int), _col1 (type: double), _col0 (type: int), _col6 (type: double)
                   outputColumnNames: _col0, _col1, _col2, _col3
-                  Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                  Statistics: Num rows: 18 Data size: 522 Basic stats: COMPLETE Column stats: NONE
                   File Output Operator
                     compressed: false
-                    Statistics: Num rows: 36 Data size: 1044 Basic stats: COMPLETE Column stats: NONE
+                    Statistics: Num rows: 18 Data size: 522 Basic stats: COMPLETE Column stats: NONE
                     table:
                         input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                         output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -275,7 +276,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[13][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[10][tables = [cmv_mat_view2_n0, $hdt$_0]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: SELECT * FROM (
   (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1
   JOIN
@@ -283,6 +284,7 @@ PREHOOK: query: SELECT * FROM (
   ON table1.a = table2.a)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@cmv_basetable_n2
+PREHOOK: Input: default@cmv_mat_view2_n0
 PREHOOK: Output: hdfs://### HDFS PATH ###
 POSTHOOK: query: SELECT * FROM (
   (SELECT a, c FROM cmv_basetable_n2 WHERE a = 3) table1
@@ -291,10 +293,11 @@ POSTHOOK: query: SELECT * FROM (
   ON table1.a = table2.a)
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@cmv_basetable_n2
+POSTHOOK: Input: default@cmv_mat_view2_n0
 POSTHOOK: Output: hdfs://### HDFS PATH ###
-3	15.8	3	978.76
-3	9.8	3	978.76
-3	978.76	3	978.76
+3	15.800000190734863	3	978.76
+3	9.800000190734863	3	978.76
+3	978.760009765625	3	978.76
 PREHOOK: query: INSERT INTO cmv_basetable_n2 VALUES
  (cast(current_timestamp() AS timestamp), 3, 'charlie', 'charlie_c', 15.8, 1)
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
index 2c4ee3d..99832ff 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
@@ -180,12 +180,12 @@ STAGE PLANS:
                     Select Operator
                       expressions: a (type: int)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
                       Reduce Output Operator
                         key expressions: _col0 (type: int)
                         sort order: +
                         Map-reduce partition columns: _col0 (type: int)
-                        Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                        Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: llap
             LLAP IO: may be used (ACID table)
         Reducer 2 

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out
index 44a866d..68e7500 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_time_window.q.out
@@ -73,14 +73,14 @@ POSTHOOK: Input: default@cmv_basetable_2_n1
 POSTHOOK: Output: default@cmv_basetable_2_n1
 #### A masked pattern was here ####
 PREHOOK: query: EXPLAIN
-CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0
   GROUP BY cmv_basetable_n3.a, cmv_basetable_2_n1.c
 PREHOOK: type: CREATE_MATERIALIZED_VIEW
 POSTHOOK: query: EXPLAIN
-CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0
@@ -190,7 +190,7 @@ STAGE PLANS:
         Create View
           columns: a int, c decimal(10,2)
           table properties:
-            rewriting.time.window 300s
+            rewriting.time.window 5min
           expanded text: SELECT `cmv_basetable_n3`.`a`, `cmv_basetable_2_n1`.`c`
   FROM `default`.`cmv_basetable_n3` JOIN `default`.`cmv_basetable_2_n1` ON (`cmv_basetable_n3`.`a` = `cmv_basetable_2_n1`.`a`)
   WHERE `cmv_basetable_2_n1`.`c` > 10.0
@@ -214,7 +214,7 @@ STAGE PLANS:
           hdfs directory: true
 #### A masked pattern was here ####
 
-PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0
@@ -224,7 +224,7 @@ PREHOOK: Input: default@cmv_basetable_2_n1
 PREHOOK: Input: default@cmv_basetable_n3
 PREHOOK: Output: database:default
 PREHOOK: Output: default@cmv_mat_view_n3
-POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='300s') AS
+POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n3 DISABLE REWRITE TBLPROPERTIES('rewriting.time.window'='5min') AS
   SELECT cmv_basetable_n3.a, cmv_basetable_2_n1.c
   FROM cmv_basetable_n3 JOIN cmv_basetable_2_n1 ON (cmv_basetable_n3.a = cmv_basetable_2_n1.a)
   WHERE cmv_basetable_2_n1.c > 10.0
@@ -256,7 +256,7 @@ Table Parameters:
 	numFiles            	2                   
 	numRows             	2                   
 	rawDataSize         	232                 
-	rewriting.time.window	300s                
+	rewriting.time.window	5min                
 	totalSize           	608                 
 #### A masked pattern was here ####
 	 	 
@@ -480,7 +480,7 @@ Table Parameters:
 	numFiles            	2                   
 	numRows             	2                   
 	rawDataSize         	232                 
-	rewriting.time.window	300s                
+	rewriting.time.window	5min                
 	totalSize           	608                 
 #### A masked pattern was here ####
 	 	 
@@ -786,7 +786,7 @@ Table Parameters:
 	numFiles            	2                   
 	numRows             	3                   
 	rawDataSize         	348                 
-	rewriting.time.window	300s                
+	rewriting.time.window	5min                
 	totalSize           	628                 
 #### A masked pattern was here ####
 	 	 

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_empty.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_empty.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_empty.q.out
index 2e6f00c..ac8cc35 100644
--- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_empty.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_empty.q.out
@@ -55,7 +55,7 @@ STAGE PLANS:
       limit: -1
       Processor Tree:
         TableScan
-          alias: emps_mv_rewrite_empty
+          alias: default.emps_mv_rewrite_empty_mv1
           filterExpr: (empid < 120) (type: boolean)
           Filter Operator
             predicate: (empid < 120) (type: boolean)
@@ -67,10 +67,12 @@ STAGE PLANS:
 PREHOOK: query: select * from emps_mv_rewrite_empty where empid < 120
 PREHOOK: type: QUERY
 PREHOOK: Input: default@emps_mv_rewrite_empty
+PREHOOK: Input: default@emps_mv_rewrite_empty_mv1
 #### A masked pattern was here ####
 POSTHOOK: query: select * from emps_mv_rewrite_empty where empid < 120
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@emps_mv_rewrite_empty
+POSTHOOK: Input: default@emps_mv_rewrite_empty_mv1
 #### A masked pattern was here ####
 PREHOOK: query: drop materialized view emps_mv_rewrite_empty_mv1
 PREHOOK: type: DROP_MATERIALIZED_VIEW


[39/48] hive git commit: HIVE-20152: reset db state, when repl dump fails, so rename table can be done (Anishek Agarwal, reviewed by Sankar Hariappan)

Posted by se...@apache.org.
HIVE-20152: reset db state, when repl dump fails, so rename table can be done (Anishek Agarwal, reviewed by Sankar Hariappan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c1337dfb
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c1337dfb
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c1337dfb

Branch: refs/heads/master-txnstats
Commit: c1337dfb73f0df6f9cd9f9ed7257917e37b38745
Parents: dceeefb
Author: Anishek Agarwal <an...@gmail.com>
Authored: Tue Jul 17 19:39:16 2018 +0530
Committer: Anishek Agarwal <an...@gmail.com>
Committed: Tue Jul 17 19:39:16 2018 +0530

----------------------------------------------------------------------
 .../hadoop/hive/ql/exec/repl/ReplDumpTask.java  |  52 +++++---
 .../hadoop/hive/ql/parse/repl/dump/Utils.java   |   4 +-
 .../hive/ql/exec/repl/ReplDumpTaskTest.java     | 126 +++++++++++++++++++
 3 files changed, 166 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/c1337dfb/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
index 82ecad1..79ee80a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.log.IncrementalDumpLogger;
 import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData;
 import org.apache.hadoop.hive.ql.plan.ExportWork.MmContext;
 import org.apache.hadoop.hive.ql.plan.api.StageType;
+import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -216,10 +217,10 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     return rspec;
   }
 
-  private Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot) throws Exception {
+  Long bootStrapDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot) throws Exception {
     // bootstrap case
     Hive hiveDb = getHive();
-    Long bootDumpBeginReplId = hiveDb.getMSC().getCurrentNotificationEventId().getEventId();
+    Long bootDumpBeginReplId = currentNotificationId(hiveDb);
     String validTxnList = getValidTxnListForReplDump(hiveDb);
     for (String dbName : Utils.matchesDb(hiveDb, work.dbNameOrPattern)) {
       LOG.debug("ReplicationSemanticAnalyzer: analyzeReplDump dumping db: " + dbName);
@@ -231,16 +232,35 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
       dumpFunctionMetadata(dbName, dumpRoot);
 
       String uniqueKey = Utils.setDbBootstrapDumpState(hiveDb, dbName);
-      for (String tblName : Utils.matchesTbl(hiveDb, dbName, work.tableNameOrPattern)) {
-        LOG.debug(
-            "analyzeReplDump dumping table: " + tblName + " to db root " + dbRoot.toUri());
-        dumpTable(dbName, tblName, validTxnList, dbRoot);
-        dumpConstraintMetadata(dbName, tblName, dbRoot);
+      Exception caught = null;
+      try {
+        for (String tblName : Utils.matchesTbl(hiveDb, dbName, work.tableNameOrPattern)) {
+          LOG.debug(
+              "analyzeReplDump dumping table: " + tblName + " to db root " + dbRoot.toUri());
+          dumpTable(dbName, tblName, validTxnList, dbRoot);
+          dumpConstraintMetadata(dbName, tblName, dbRoot);
+        }
+      } catch (Exception e) {
+        caught = e;
+      } finally {
+        try {
+          Utils.resetDbBootstrapDumpState(hiveDb, dbName, uniqueKey);
+        } catch (Exception e) {
+          if (caught == null) {
+            throw e;
+          } else {
+            LOG.error("failed to reset the db state for " + uniqueKey
+                + " on failure of repl dump", e);
+            throw caught;
+          }
+        }
+        if(caught != null) {
+          throw caught;
+        }
       }
-      Utils.resetDbBootstrapDumpState(hiveDb, dbName, uniqueKey);
       replLogger.endLog(bootDumpBeginReplId.toString());
     }
-    Long bootDumpEndReplId = hiveDb.getMSC().getCurrentNotificationEventId().getEventId();
+    Long bootDumpEndReplId = currentNotificationId(hiveDb);
     LOG.info("Bootstrap object dump phase took from {} to {}", bootDumpBeginReplId,
         bootDumpEndReplId);
 
@@ -274,7 +294,11 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     return bootDumpBeginReplId;
   }
 
-  private Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId) throws Exception {
+  long currentNotificationId(Hive hiveDb) throws TException {
+    return hiveDb.getMSC().getCurrentNotificationEventId().getEventId();
+  }
+
+  Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId) throws Exception {
     Path dbRoot = new Path(dumpRoot, dbName);
     // TODO : instantiating FS objects are generally costly. Refactor
     FileSystem fs = dbRoot.getFileSystem(conf);
@@ -284,7 +308,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     return dbRoot;
   }
 
-  private void dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot) throws Exception {
+  void dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot) throws Exception {
     try {
       Hive db = getHive();
       HiveWrapper.Tuple<Table> tuple = new HiveWrapper(db, dbName).table(tblName);
@@ -331,7 +355,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     return openTxns;
   }
 
-  private String getValidTxnListForReplDump(Hive hiveDb) throws HiveException {
+  String getValidTxnListForReplDump(Hive hiveDb) throws HiveException {
     // Key design point for REPL DUMP is to not have any txns older than current txn in which dump runs.
     // This is needed to ensure that Repl dump doesn't copy any data files written by any open txns
     // mainly for streaming ingest case where one delta file shall have data from committed/aborted/open txns.
@@ -396,7 +420,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     }
   }
 
-  private void dumpFunctionMetadata(String dbName, Path dumpRoot) throws Exception {
+  void dumpFunctionMetadata(String dbName, Path dumpRoot) throws Exception {
     Path functionsRoot = new Path(new Path(dumpRoot, dbName), FUNCTIONS_ROOT_DIR_NAME);
     List<String> functionNames = getHive().getFunctions(dbName, "*");
     for (String functionName : functionNames) {
@@ -415,7 +439,7 @@ public class ReplDumpTask extends Task<ReplDumpWork> implements Serializable {
     }
   }
 
-  private void dumpConstraintMetadata(String dbName, String tblName, Path dbRoot) throws Exception {
+  void dumpConstraintMetadata(String dbName, String tblName, Path dbRoot) throws Exception {
     try {
       Path constraintsRoot = new Path(dbRoot, CONSTRAINTS_ROOT_DIR_NAME);
       Path commonConstraintsFile = new Path(constraintsRoot, ConstraintFileType.COMMON.getPrefix() + tblName);

http://git-wip-us.apache.org/repos/asf/hive/blob/c1337dfb/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
index 62d699f..59ffb90 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/Utils.java
@@ -75,7 +75,7 @@ public class Utils {
     }
   }
 
-  public static Iterable<? extends String> matchesDb(Hive db, String dbPattern) throws HiveException {
+  public static Iterable<String> matchesDb(Hive db, String dbPattern) throws HiveException {
     if (dbPattern == null) {
       return db.getAllDatabases();
     } else {
@@ -83,7 +83,7 @@ public class Utils {
     }
   }
 
-  public static Iterable<? extends String> matchesTbl(Hive db, String dbName, String tblPattern)
+  public static Iterable<String> matchesTbl(Hive db, String dbName, String tblPattern)
       throws HiveException {
     if (tblPattern == null) {
       return getAllTables(db, dbName);

http://git-wip-us.apache.org/repos/asf/hive/blob/c1337dfb/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTaskTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTaskTest.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTaskTest.java
new file mode 100644
index 0000000..7bd035e
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTaskTest.java
@@ -0,0 +1,126 @@
+  /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.exec.repl;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.parse.repl.dump.Utils;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
+import org.powermock.core.classloader.annotations.PrepareForTest;
+import org.powermock.modules.junit4.PowerMockRunner;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Matchers.same;
+import static org.mockito.Mockito.mock;
+import static org.powermock.api.mockito.PowerMockito.mockStatic;
+import static org.powermock.api.mockito.PowerMockito.verifyStatic;
+import static org.powermock.api.mockito.PowerMockito.when;
+
+@RunWith(PowerMockRunner.class)
+@PrepareForTest({ Utils.class })
+@PowerMockIgnore({ "javax.management.*" })
+public class ReplDumpTaskTest {
+
+  @Mock
+  private Hive hive;
+
+  class StubReplDumpTask extends ReplDumpTask {
+
+    @Override
+    protected Hive getHive() {
+      return hive;
+    }
+
+    @Override
+    long currentNotificationId(Hive hiveDb) {
+      return Long.MAX_VALUE;
+    }
+
+    @Override
+    String getValidTxnListForReplDump(Hive hiveDb) {
+      return "";
+    }
+
+    @Override
+    void dumpFunctionMetadata(String dbName, Path dumpRoot) {
+    }
+
+    @Override
+    Path dumpDbMetadata(String dbName, Path dumpRoot, long lastReplId) {
+      return Mockito.mock(Path.class);
+    }
+
+    @Override
+    void dumpConstraintMetadata(String dbName, String tblName, Path dbRoot) {
+    }
+  }
+
+  private static class TestException extends Exception {
+  }
+
+  @Test(expected = TestException.class)
+  public void removeDBPropertyToPreventRenameWhenBootstrapDumpOfTableFails() throws Exception {
+    List<String> tableList = Arrays.asList("a1", "a2");
+    String dbRandomKey = "akeytoberandom";
+
+    mockStatic(Utils.class);
+    when(Utils.matchesDb(same(hive), eq("default")))
+        .thenReturn(Collections.singletonList("default"));
+    when(Utils.getAllTables(same(hive), eq("default"))).thenReturn(tableList);
+    when(Utils.setDbBootstrapDumpState(same(hive), eq("default"))).thenReturn(dbRandomKey);
+    when(Utils.matchesTbl(same(hive), eq("default"), anyString())).thenReturn(tableList);
+
+
+    when(hive.getAllFunctions()).thenReturn(Collections.emptyList());
+
+    ReplDumpTask task = new StubReplDumpTask() {
+      private int tableDumpCount = 0;
+
+      @Override
+      void dumpTable(String dbName, String tblName, String validTxnList, Path dbRoot)
+          throws Exception {
+        tableDumpCount++;
+        if (tableDumpCount > 1) {
+          throw new TestException();
+        }
+      }
+    };
+
+    task.setWork(
+        new ReplDumpWork("default", "",
+            Long.MAX_VALUE, Long.MAX_VALUE, "",
+            Integer.MAX_VALUE, "")
+    );
+
+    try {
+      task.bootStrapDump(mock(Path.class), null, mock(Path.class));
+    } finally {
+      verifyStatic();
+      Utils.resetDbBootstrapDumpState(same(hive), eq("default"), eq(dbRandomKey));
+    }
+  }
+}


[27/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
index a816ae7..9d57d4c 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -2334,14 +2334,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1226;
-            ::apache::thrift::protocol::TType _etype1229;
-            xfer += iprot->readListBegin(_etype1229, _size1226);
-            this->success.resize(_size1226);
-            uint32_t _i1230;
-            for (_i1230 = 0; _i1230 < _size1226; ++_i1230)
+            uint32_t _size1219;
+            ::apache::thrift::protocol::TType _etype1222;
+            xfer += iprot->readListBegin(_etype1222, _size1219);
+            this->success.resize(_size1219);
+            uint32_t _i1223;
+            for (_i1223 = 0; _i1223 < _size1219; ++_i1223)
             {
-              xfer += iprot->readString(this->success[_i1230]);
+              xfer += iprot->readString(this->success[_i1223]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2380,10 +2380,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1231;
-      for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231)
+      std::vector<std::string> ::const_iterator _iter1224;
+      for (_iter1224 = this->success.begin(); _iter1224 != this->success.end(); ++_iter1224)
       {
-        xfer += oprot->writeString((*_iter1231));
+        xfer += oprot->writeString((*_iter1224));
       }
       xfer += oprot->writeListEnd();
     }
@@ -2428,14 +2428,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1232;
-            ::apache::thrift::protocol::TType _etype1235;
-            xfer += iprot->readListBegin(_etype1235, _size1232);
-            (*(this->success)).resize(_size1232);
-            uint32_t _i1236;
-            for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
+            uint32_t _size1225;
+            ::apache::thrift::protocol::TType _etype1228;
+            xfer += iprot->readListBegin(_etype1228, _size1225);
+            (*(this->success)).resize(_size1225);
+            uint32_t _i1229;
+            for (_i1229 = 0; _i1229 < _size1225; ++_i1229)
             {
-              xfer += iprot->readString((*(this->success))[_i1236]);
+              xfer += iprot->readString((*(this->success))[_i1229]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2552,14 +2552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1237;
-            ::apache::thrift::protocol::TType _etype1240;
-            xfer += iprot->readListBegin(_etype1240, _size1237);
-            this->success.resize(_size1237);
-            uint32_t _i1241;
-            for (_i1241 = 0; _i1241 < _size1237; ++_i1241)
+            uint32_t _size1230;
+            ::apache::thrift::protocol::TType _etype1233;
+            xfer += iprot->readListBegin(_etype1233, _size1230);
+            this->success.resize(_size1230);
+            uint32_t _i1234;
+            for (_i1234 = 0; _i1234 < _size1230; ++_i1234)
             {
-              xfer += iprot->readString(this->success[_i1241]);
+              xfer += iprot->readString(this->success[_i1234]);
             }
             xfer += iprot->readListEnd();
           }
@@ -2598,10 +2598,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1242;
-      for (_iter1242 = this->success.begin(); _iter1242 != this->success.end(); ++_iter1242)
+      std::vector<std::string> ::const_iterator _iter1235;
+      for (_iter1235 = this->success.begin(); _iter1235 != this->success.end(); ++_iter1235)
       {
-        xfer += oprot->writeString((*_iter1242));
+        xfer += oprot->writeString((*_iter1235));
       }
       xfer += oprot->writeListEnd();
     }
@@ -2646,14 +2646,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1243;
-            ::apache::thrift::protocol::TType _etype1246;
-            xfer += iprot->readListBegin(_etype1246, _size1243);
-            (*(this->success)).resize(_size1243);
-            uint32_t _i1247;
-            for (_i1247 = 0; _i1247 < _size1243; ++_i1247)
+            uint32_t _size1236;
+            ::apache::thrift::protocol::TType _etype1239;
+            xfer += iprot->readListBegin(_etype1239, _size1236);
+            (*(this->success)).resize(_size1236);
+            uint32_t _i1240;
+            for (_i1240 = 0; _i1240 < _size1236; ++_i1240)
             {
-              xfer += iprot->readString((*(this->success))[_i1247]);
+              xfer += iprot->readString((*(this->success))[_i1240]);
             }
             xfer += iprot->readListEnd();
           }
@@ -3715,17 +3715,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             this->success.clear();
-            uint32_t _size1248;
-            ::apache::thrift::protocol::TType _ktype1249;
-            ::apache::thrift::protocol::TType _vtype1250;
-            xfer += iprot->readMapBegin(_ktype1249, _vtype1250, _size1248);
-            uint32_t _i1252;
-            for (_i1252 = 0; _i1252 < _size1248; ++_i1252)
+            uint32_t _size1241;
+            ::apache::thrift::protocol::TType _ktype1242;
+            ::apache::thrift::protocol::TType _vtype1243;
+            xfer += iprot->readMapBegin(_ktype1242, _vtype1243, _size1241);
+            uint32_t _i1245;
+            for (_i1245 = 0; _i1245 < _size1241; ++_i1245)
             {
-              std::string _key1253;
-              xfer += iprot->readString(_key1253);
-              Type& _val1254 = this->success[_key1253];
-              xfer += _val1254.read(iprot);
+              std::string _key1246;
+              xfer += iprot->readString(_key1246);
+              Type& _val1247 = this->success[_key1246];
+              xfer += _val1247.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -3764,11 +3764,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
     {
       xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Type> ::const_iterator _iter1255;
-      for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255)
+      std::map<std::string, Type> ::const_iterator _iter1248;
+      for (_iter1248 = this->success.begin(); _iter1248 != this->success.end(); ++_iter1248)
       {
-        xfer += oprot->writeString(_iter1255->first);
-        xfer += _iter1255->second.write(oprot);
+        xfer += oprot->writeString(_iter1248->first);
+        xfer += _iter1248->second.write(oprot);
       }
       xfer += oprot->writeMapEnd();
     }
@@ -3813,17 +3813,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc
         if (ftype == ::apache::thrift::protocol::T_MAP) {
           {
             (*(this->success)).clear();
-            uint32_t _size1256;
-            ::apache::thrift::protocol::TType _ktype1257;
-            ::apache::thrift::protocol::TType _vtype1258;
-            xfer += iprot->readMapBegin(_ktype1257, _vtype1258, _size1256);
-            uint32_t _i1260;
-            for (_i1260 = 0; _i1260 < _size1256; ++_i1260)
+            uint32_t _size1249;
+            ::apache::thrift::protocol::TType _ktype1250;
+            ::apache::thrift::protocol::TType _vtype1251;
+            xfer += iprot->readMapBegin(_ktype1250, _vtype1251, _size1249);
+            uint32_t _i1253;
+            for (_i1253 = 0; _i1253 < _size1249; ++_i1253)
             {
-              std::string _key1261;
-              xfer += iprot->readString(_key1261);
-              Type& _val1262 = (*(this->success))[_key1261];
-              xfer += _val1262.read(iprot);
+              std::string _key1254;
+              xfer += iprot->readString(_key1254);
+              Type& _val1255 = (*(this->success))[_key1254];
+              xfer += _val1255.read(iprot);
             }
             xfer += iprot->readMapEnd();
           }
@@ -3977,14 +3977,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1263;
-            ::apache::thrift::protocol::TType _etype1266;
-            xfer += iprot->readListBegin(_etype1266, _size1263);
-            this->success.resize(_size1263);
-            uint32_t _i1267;
-            for (_i1267 = 0; _i1267 < _size1263; ++_i1267)
+            uint32_t _size1256;
+            ::apache::thrift::protocol::TType _etype1259;
+            xfer += iprot->readListBegin(_etype1259, _size1256);
+            this->success.resize(_size1256);
+            uint32_t _i1260;
+            for (_i1260 = 0; _i1260 < _size1256; ++_i1260)
             {
-              xfer += this->success[_i1267].read(iprot);
+              xfer += this->success[_i1260].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4039,10 +4039,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1268;
-      for (_iter1268 = this->success.begin(); _iter1268 != this->success.end(); ++_iter1268)
+      std::vector<FieldSchema> ::const_iterator _iter1261;
+      for (_iter1261 = this->success.begin(); _iter1261 != this->success.end(); ++_iter1261)
       {
-        xfer += (*_iter1268).write(oprot);
+        xfer += (*_iter1261).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -4095,14 +4095,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1269;
-            ::apache::thrift::protocol::TType _etype1272;
-            xfer += iprot->readListBegin(_etype1272, _size1269);
-            (*(this->success)).resize(_size1269);
-            uint32_t _i1273;
-            for (_i1273 = 0; _i1273 < _size1269; ++_i1273)
+            uint32_t _size1262;
+            ::apache::thrift::protocol::TType _etype1265;
+            xfer += iprot->readListBegin(_etype1265, _size1262);
+            (*(this->success)).resize(_size1262);
+            uint32_t _i1266;
+            for (_i1266 = 0; _i1266 < _size1262; ++_i1266)
             {
-              xfer += (*(this->success))[_i1273].read(iprot);
+              xfer += (*(this->success))[_i1266].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4288,14 +4288,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1274;
-            ::apache::thrift::protocol::TType _etype1277;
-            xfer += iprot->readListBegin(_etype1277, _size1274);
-            this->success.resize(_size1274);
-            uint32_t _i1278;
-            for (_i1278 = 0; _i1278 < _size1274; ++_i1278)
+            uint32_t _size1267;
+            ::apache::thrift::protocol::TType _etype1270;
+            xfer += iprot->readListBegin(_etype1270, _size1267);
+            this->success.resize(_size1267);
+            uint32_t _i1271;
+            for (_i1271 = 0; _i1271 < _size1267; ++_i1271)
             {
-              xfer += this->success[_i1278].read(iprot);
+              xfer += this->success[_i1271].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4350,10 +4350,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1279;
-      for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279)
+      std::vector<FieldSchema> ::const_iterator _iter1272;
+      for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272)
       {
-        xfer += (*_iter1279).write(oprot);
+        xfer += (*_iter1272).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -4406,14 +4406,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1280;
-            ::apache::thrift::protocol::TType _etype1283;
-            xfer += iprot->readListBegin(_etype1283, _size1280);
-            (*(this->success)).resize(_size1280);
-            uint32_t _i1284;
-            for (_i1284 = 0; _i1284 < _size1280; ++_i1284)
+            uint32_t _size1273;
+            ::apache::thrift::protocol::TType _etype1276;
+            xfer += iprot->readListBegin(_etype1276, _size1273);
+            (*(this->success)).resize(_size1273);
+            uint32_t _i1277;
+            for (_i1277 = 0; _i1277 < _size1273; ++_i1277)
             {
-              xfer += (*(this->success))[_i1284].read(iprot);
+              xfer += (*(this->success))[_i1277].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4583,14 +4583,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1285;
-            ::apache::thrift::protocol::TType _etype1288;
-            xfer += iprot->readListBegin(_etype1288, _size1285);
-            this->success.resize(_size1285);
-            uint32_t _i1289;
-            for (_i1289 = 0; _i1289 < _size1285; ++_i1289)
+            uint32_t _size1278;
+            ::apache::thrift::protocol::TType _etype1281;
+            xfer += iprot->readListBegin(_etype1281, _size1278);
+            this->success.resize(_size1278);
+            uint32_t _i1282;
+            for (_i1282 = 0; _i1282 < _size1278; ++_i1282)
             {
-              xfer += this->success[_i1289].read(iprot);
+              xfer += this->success[_i1282].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4645,10 +4645,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1290;
-      for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290)
+      std::vector<FieldSchema> ::const_iterator _iter1283;
+      for (_iter1283 = this->success.begin(); _iter1283 != this->success.end(); ++_iter1283)
       {
-        xfer += (*_iter1290).write(oprot);
+        xfer += (*_iter1283).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -4701,14 +4701,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1291;
-            ::apache::thrift::protocol::TType _etype1294;
-            xfer += iprot->readListBegin(_etype1294, _size1291);
-            (*(this->success)).resize(_size1291);
-            uint32_t _i1295;
-            for (_i1295 = 0; _i1295 < _size1291; ++_i1295)
+            uint32_t _size1284;
+            ::apache::thrift::protocol::TType _etype1287;
+            xfer += iprot->readListBegin(_etype1287, _size1284);
+            (*(this->success)).resize(_size1284);
+            uint32_t _i1288;
+            for (_i1288 = 0; _i1288 < _size1284; ++_i1288)
             {
-              xfer += (*(this->success))[_i1295].read(iprot);
+              xfer += (*(this->success))[_i1288].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4894,14 +4894,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1296;
-            ::apache::thrift::protocol::TType _etype1299;
-            xfer += iprot->readListBegin(_etype1299, _size1296);
-            this->success.resize(_size1296);
-            uint32_t _i1300;
-            for (_i1300 = 0; _i1300 < _size1296; ++_i1300)
+            uint32_t _size1289;
+            ::apache::thrift::protocol::TType _etype1292;
+            xfer += iprot->readListBegin(_etype1292, _size1289);
+            this->success.resize(_size1289);
+            uint32_t _i1293;
+            for (_i1293 = 0; _i1293 < _size1289; ++_i1293)
             {
-              xfer += this->success[_i1300].read(iprot);
+              xfer += this->success[_i1293].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -4956,10 +4956,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(:
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<FieldSchema> ::const_iterator _iter1301;
-      for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301)
+      std::vector<FieldSchema> ::const_iterator _iter1294;
+      for (_iter1294 = this->success.begin(); _iter1294 != this->success.end(); ++_iter1294)
       {
-        xfer += (*_iter1301).write(oprot);
+        xfer += (*_iter1294).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -5012,14 +5012,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1302;
-            ::apache::thrift::protocol::TType _etype1305;
-            xfer += iprot->readListBegin(_etype1305, _size1302);
-            (*(this->success)).resize(_size1302);
-            uint32_t _i1306;
-            for (_i1306 = 0; _i1306 < _size1302; ++_i1306)
+            uint32_t _size1295;
+            ::apache::thrift::protocol::TType _etype1298;
+            xfer += iprot->readListBegin(_etype1298, _size1295);
+            (*(this->success)).resize(_size1295);
+            uint32_t _i1299;
+            for (_i1299 = 0; _i1299 < _size1295; ++_i1299)
             {
-              xfer += (*(this->success))[_i1306].read(iprot);
+              xfer += (*(this->success))[_i1299].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5612,14 +5612,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->primaryKeys.clear();
-            uint32_t _size1307;
-            ::apache::thrift::protocol::TType _etype1310;
-            xfer += iprot->readListBegin(_etype1310, _size1307);
-            this->primaryKeys.resize(_size1307);
-            uint32_t _i1311;
-            for (_i1311 = 0; _i1311 < _size1307; ++_i1311)
+            uint32_t _size1300;
+            ::apache::thrift::protocol::TType _etype1303;
+            xfer += iprot->readListBegin(_etype1303, _size1300);
+            this->primaryKeys.resize(_size1300);
+            uint32_t _i1304;
+            for (_i1304 = 0; _i1304 < _size1300; ++_i1304)
             {
-              xfer += this->primaryKeys[_i1311].read(iprot);
+              xfer += this->primaryKeys[_i1304].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5632,14 +5632,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->foreignKeys.clear();
-            uint32_t _size1312;
-            ::apache::thrift::protocol::TType _etype1315;
-            xfer += iprot->readListBegin(_etype1315, _size1312);
-            this->foreignKeys.resize(_size1312);
-            uint32_t _i1316;
-            for (_i1316 = 0; _i1316 < _size1312; ++_i1316)
+            uint32_t _size1305;
+            ::apache::thrift::protocol::TType _etype1308;
+            xfer += iprot->readListBegin(_etype1308, _size1305);
+            this->foreignKeys.resize(_size1305);
+            uint32_t _i1309;
+            for (_i1309 = 0; _i1309 < _size1305; ++_i1309)
             {
-              xfer += this->foreignKeys[_i1316].read(iprot);
+              xfer += this->foreignKeys[_i1309].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5652,14 +5652,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->uniqueConstraints.clear();
-            uint32_t _size1317;
-            ::apache::thrift::protocol::TType _etype1320;
-            xfer += iprot->readListBegin(_etype1320, _size1317);
-            this->uniqueConstraints.resize(_size1317);
-            uint32_t _i1321;
-            for (_i1321 = 0; _i1321 < _size1317; ++_i1321)
+            uint32_t _size1310;
+            ::apache::thrift::protocol::TType _etype1313;
+            xfer += iprot->readListBegin(_etype1313, _size1310);
+            this->uniqueConstraints.resize(_size1310);
+            uint32_t _i1314;
+            for (_i1314 = 0; _i1314 < _size1310; ++_i1314)
             {
-              xfer += this->uniqueConstraints[_i1321].read(iprot);
+              xfer += this->uniqueConstraints[_i1314].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5672,14 +5672,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->notNullConstraints.clear();
-            uint32_t _size1322;
-            ::apache::thrift::protocol::TType _etype1325;
-            xfer += iprot->readListBegin(_etype1325, _size1322);
-            this->notNullConstraints.resize(_size1322);
-            uint32_t _i1326;
-            for (_i1326 = 0; _i1326 < _size1322; ++_i1326)
+            uint32_t _size1315;
+            ::apache::thrift::protocol::TType _etype1318;
+            xfer += iprot->readListBegin(_etype1318, _size1315);
+            this->notNullConstraints.resize(_size1315);
+            uint32_t _i1319;
+            for (_i1319 = 0; _i1319 < _size1315; ++_i1319)
             {
-              xfer += this->notNullConstraints[_i1326].read(iprot);
+              xfer += this->notNullConstraints[_i1319].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5692,14 +5692,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->defaultConstraints.clear();
-            uint32_t _size1327;
-            ::apache::thrift::protocol::TType _etype1330;
-            xfer += iprot->readListBegin(_etype1330, _size1327);
-            this->defaultConstraints.resize(_size1327);
-            uint32_t _i1331;
-            for (_i1331 = 0; _i1331 < _size1327; ++_i1331)
+            uint32_t _size1320;
+            ::apache::thrift::protocol::TType _etype1323;
+            xfer += iprot->readListBegin(_etype1323, _size1320);
+            this->defaultConstraints.resize(_size1320);
+            uint32_t _i1324;
+            for (_i1324 = 0; _i1324 < _size1320; ++_i1324)
             {
-              xfer += this->defaultConstraints[_i1331].read(iprot);
+              xfer += this->defaultConstraints[_i1324].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5712,14 +5712,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->checkConstraints.clear();
-            uint32_t _size1332;
-            ::apache::thrift::protocol::TType _etype1335;
-            xfer += iprot->readListBegin(_etype1335, _size1332);
-            this->checkConstraints.resize(_size1332);
-            uint32_t _i1336;
-            for (_i1336 = 0; _i1336 < _size1332; ++_i1336)
+            uint32_t _size1325;
+            ::apache::thrift::protocol::TType _etype1328;
+            xfer += iprot->readListBegin(_etype1328, _size1325);
+            this->checkConstraints.resize(_size1325);
+            uint32_t _i1329;
+            for (_i1329 = 0; _i1329 < _size1325; ++_i1329)
             {
-              xfer += this->checkConstraints[_i1336].read(iprot);
+              xfer += this->checkConstraints[_i1329].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -5752,10 +5752,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->primaryKeys.size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter1337;
-    for (_iter1337 = this->primaryKeys.begin(); _iter1337 != this->primaryKeys.end(); ++_iter1337)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter1330;
+    for (_iter1330 = this->primaryKeys.begin(); _iter1330 != this->primaryKeys.end(); ++_iter1330)
     {
-      xfer += (*_iter1337).write(oprot);
+      xfer += (*_iter1330).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5764,10 +5764,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->foreignKeys.size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter1338;
-    for (_iter1338 = this->foreignKeys.begin(); _iter1338 != this->foreignKeys.end(); ++_iter1338)
+    std::vector<SQLForeignKey> ::const_iterator _iter1331;
+    for (_iter1331 = this->foreignKeys.begin(); _iter1331 != this->foreignKeys.end(); ++_iter1331)
     {
-      xfer += (*_iter1338).write(oprot);
+      xfer += (*_iter1331).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5776,10 +5776,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->uniqueConstraints.size()));
-    std::vector<SQLUniqueConstraint> ::const_iterator _iter1339;
-    for (_iter1339 = this->uniqueConstraints.begin(); _iter1339 != this->uniqueConstraints.end(); ++_iter1339)
+    std::vector<SQLUniqueConstraint> ::const_iterator _iter1332;
+    for (_iter1332 = this->uniqueConstraints.begin(); _iter1332 != this->uniqueConstraints.end(); ++_iter1332)
     {
-      xfer += (*_iter1339).write(oprot);
+      xfer += (*_iter1332).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5788,10 +5788,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->notNullConstraints.size()));
-    std::vector<SQLNotNullConstraint> ::const_iterator _iter1340;
-    for (_iter1340 = this->notNullConstraints.begin(); _iter1340 != this->notNullConstraints.end(); ++_iter1340)
+    std::vector<SQLNotNullConstraint> ::const_iterator _iter1333;
+    for (_iter1333 = this->notNullConstraints.begin(); _iter1333 != this->notNullConstraints.end(); ++_iter1333)
     {
-      xfer += (*_iter1340).write(oprot);
+      xfer += (*_iter1333).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5800,10 +5800,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->defaultConstraints.size()));
-    std::vector<SQLDefaultConstraint> ::const_iterator _iter1341;
-    for (_iter1341 = this->defaultConstraints.begin(); _iter1341 != this->defaultConstraints.end(); ++_iter1341)
+    std::vector<SQLDefaultConstraint> ::const_iterator _iter1334;
+    for (_iter1334 = this->defaultConstraints.begin(); _iter1334 != this->defaultConstraints.end(); ++_iter1334)
     {
-      xfer += (*_iter1341).write(oprot);
+      xfer += (*_iter1334).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5812,10 +5812,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache:
   xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->checkConstraints.size()));
-    std::vector<SQLCheckConstraint> ::const_iterator _iter1342;
-    for (_iter1342 = this->checkConstraints.begin(); _iter1342 != this->checkConstraints.end(); ++_iter1342)
+    std::vector<SQLCheckConstraint> ::const_iterator _iter1335;
+    for (_iter1335 = this->checkConstraints.begin(); _iter1335 != this->checkConstraints.end(); ++_iter1335)
     {
-      xfer += (*_iter1342).write(oprot);
+      xfer += (*_iter1335).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5843,10 +5843,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->primaryKeys)).size()));
-    std::vector<SQLPrimaryKey> ::const_iterator _iter1343;
-    for (_iter1343 = (*(this->primaryKeys)).begin(); _iter1343 != (*(this->primaryKeys)).end(); ++_iter1343)
+    std::vector<SQLPrimaryKey> ::const_iterator _iter1336;
+    for (_iter1336 = (*(this->primaryKeys)).begin(); _iter1336 != (*(this->primaryKeys)).end(); ++_iter1336)
     {
-      xfer += (*_iter1343).write(oprot);
+      xfer += (*_iter1336).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5855,10 +5855,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->foreignKeys)).size()));
-    std::vector<SQLForeignKey> ::const_iterator _iter1344;
-    for (_iter1344 = (*(this->foreignKeys)).begin(); _iter1344 != (*(this->foreignKeys)).end(); ++_iter1344)
+    std::vector<SQLForeignKey> ::const_iterator _iter1337;
+    for (_iter1337 = (*(this->foreignKeys)).begin(); _iter1337 != (*(this->foreignKeys)).end(); ++_iter1337)
     {
-      xfer += (*_iter1344).write(oprot);
+      xfer += (*_iter1337).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5867,10 +5867,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->uniqueConstraints)).size()));
-    std::vector<SQLUniqueConstraint> ::const_iterator _iter1345;
-    for (_iter1345 = (*(this->uniqueConstraints)).begin(); _iter1345 != (*(this->uniqueConstraints)).end(); ++_iter1345)
+    std::vector<SQLUniqueConstraint> ::const_iterator _iter1338;
+    for (_iter1338 = (*(this->uniqueConstraints)).begin(); _iter1338 != (*(this->uniqueConstraints)).end(); ++_iter1338)
     {
-      xfer += (*_iter1345).write(oprot);
+      xfer += (*_iter1338).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5879,10 +5879,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->notNullConstraints)).size()));
-    std::vector<SQLNotNullConstraint> ::const_iterator _iter1346;
-    for (_iter1346 = (*(this->notNullConstraints)).begin(); _iter1346 != (*(this->notNullConstraints)).end(); ++_iter1346)
+    std::vector<SQLNotNullConstraint> ::const_iterator _iter1339;
+    for (_iter1339 = (*(this->notNullConstraints)).begin(); _iter1339 != (*(this->notNullConstraints)).end(); ++_iter1339)
     {
-      xfer += (*_iter1346).write(oprot);
+      xfer += (*_iter1339).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5891,10 +5891,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->defaultConstraints)).size()));
-    std::vector<SQLDefaultConstraint> ::const_iterator _iter1347;
-    for (_iter1347 = (*(this->defaultConstraints)).begin(); _iter1347 != (*(this->defaultConstraints)).end(); ++_iter1347)
+    std::vector<SQLDefaultConstraint> ::const_iterator _iter1340;
+    for (_iter1340 = (*(this->defaultConstraints)).begin(); _iter1340 != (*(this->defaultConstraints)).end(); ++_iter1340)
     {
-      xfer += (*_iter1347).write(oprot);
+      xfer += (*_iter1340).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -5903,10 +5903,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache
   xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->checkConstraints)).size()));
-    std::vector<SQLCheckConstraint> ::const_iterator _iter1348;
-    for (_iter1348 = (*(this->checkConstraints)).begin(); _iter1348 != (*(this->checkConstraints)).end(); ++_iter1348)
+    std::vector<SQLCheckConstraint> ::const_iterator _iter1341;
+    for (_iter1341 = (*(this->checkConstraints)).begin(); _iter1341 != (*(this->checkConstraints)).end(); ++_iter1341)
     {
-      xfer += (*_iter1348).write(oprot);
+      xfer += (*_iter1341).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -8074,14 +8074,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->partNames.clear();
-            uint32_t _size1349;
-            ::apache::thrift::protocol::TType _etype1352;
-            xfer += iprot->readListBegin(_etype1352, _size1349);
-            this->partNames.resize(_size1349);
-            uint32_t _i1353;
-            for (_i1353 = 0; _i1353 < _size1349; ++_i1353)
+            uint32_t _size1342;
+            ::apache::thrift::protocol::TType _etype1345;
+            xfer += iprot->readListBegin(_etype1345, _size1342);
+            this->partNames.resize(_size1342);
+            uint32_t _i1346;
+            for (_i1346 = 0; _i1346 < _size1342; ++_i1346)
             {
-              xfer += iprot->readString(this->partNames[_i1353]);
+              xfer += iprot->readString(this->partNames[_i1346]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8118,10 +8118,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partNames.size()));
-    std::vector<std::string> ::const_iterator _iter1354;
-    for (_iter1354 = this->partNames.begin(); _iter1354 != this->partNames.end(); ++_iter1354)
+    std::vector<std::string> ::const_iterator _iter1347;
+    for (_iter1347 = this->partNames.begin(); _iter1347 != this->partNames.end(); ++_iter1347)
     {
-      xfer += oprot->writeString((*_iter1354));
+      xfer += oprot->writeString((*_iter1347));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8153,10 +8153,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->partNames)).size()));
-    std::vector<std::string> ::const_iterator _iter1355;
-    for (_iter1355 = (*(this->partNames)).begin(); _iter1355 != (*(this->partNames)).end(); ++_iter1355)
+    std::vector<std::string> ::const_iterator _iter1348;
+    for (_iter1348 = (*(this->partNames)).begin(); _iter1348 != (*(this->partNames)).end(); ++_iter1348)
     {
-      xfer += oprot->writeString((*_iter1355));
+      xfer += oprot->writeString((*_iter1348));
     }
     xfer += oprot->writeListEnd();
   }
@@ -8400,14 +8400,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1356;
-            ::apache::thrift::protocol::TType _etype1359;
-            xfer += iprot->readListBegin(_etype1359, _size1356);
-            this->success.resize(_size1356);
-            uint32_t _i1360;
-            for (_i1360 = 0; _i1360 < _size1356; ++_i1360)
+            uint32_t _size1349;
+            ::apache::thrift::protocol::TType _etype1352;
+            xfer += iprot->readListBegin(_etype1352, _size1349);
+            this->success.resize(_size1349);
+            uint32_t _i1353;
+            for (_i1353 = 0; _i1353 < _size1349; ++_i1353)
             {
-              xfer += iprot->readString(this->success[_i1360]);
+              xfer += iprot->readString(this->success[_i1353]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8446,10 +8446,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1361;
-      for (_iter1361 = this->success.begin(); _iter1361 != this->success.end(); ++_iter1361)
+      std::vector<std::string> ::const_iterator _iter1354;
+      for (_iter1354 = this->success.begin(); _iter1354 != this->success.end(); ++_iter1354)
       {
-        xfer += oprot->writeString((*_iter1361));
+        xfer += oprot->writeString((*_iter1354));
       }
       xfer += oprot->writeListEnd();
     }
@@ -8494,14 +8494,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1362;
-            ::apache::thrift::protocol::TType _etype1365;
-            xfer += iprot->readListBegin(_etype1365, _size1362);
-            (*(this->success)).resize(_size1362);
-            uint32_t _i1366;
-            for (_i1366 = 0; _i1366 < _size1362; ++_i1366)
+            uint32_t _size1355;
+            ::apache::thrift::protocol::TType _etype1358;
+            xfer += iprot->readListBegin(_etype1358, _size1355);
+            (*(this->success)).resize(_size1355);
+            uint32_t _i1359;
+            for (_i1359 = 0; _i1359 < _size1355; ++_i1359)
             {
-              xfer += iprot->readString((*(this->success))[_i1366]);
+              xfer += iprot->readString((*(this->success))[_i1359]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8671,14 +8671,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1367;
-            ::apache::thrift::protocol::TType _etype1370;
-            xfer += iprot->readListBegin(_etype1370, _size1367);
-            this->success.resize(_size1367);
-            uint32_t _i1371;
-            for (_i1371 = 0; _i1371 < _size1367; ++_i1371)
+            uint32_t _size1360;
+            ::apache::thrift::protocol::TType _etype1363;
+            xfer += iprot->readListBegin(_etype1363, _size1360);
+            this->success.resize(_size1360);
+            uint32_t _i1364;
+            for (_i1364 = 0; _i1364 < _size1360; ++_i1364)
             {
-              xfer += iprot->readString(this->success[_i1371]);
+              xfer += iprot->readString(this->success[_i1364]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8717,10 +8717,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift::
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1372;
-      for (_iter1372 = this->success.begin(); _iter1372 != this->success.end(); ++_iter1372)
+      std::vector<std::string> ::const_iterator _iter1365;
+      for (_iter1365 = this->success.begin(); _iter1365 != this->success.end(); ++_iter1365)
       {
-        xfer += oprot->writeString((*_iter1372));
+        xfer += oprot->writeString((*_iter1365));
       }
       xfer += oprot->writeListEnd();
     }
@@ -8765,14 +8765,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift::
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1373;
-            ::apache::thrift::protocol::TType _etype1376;
-            xfer += iprot->readListBegin(_etype1376, _size1373);
-            (*(this->success)).resize(_size1373);
-            uint32_t _i1377;
-            for (_i1377 = 0; _i1377 < _size1373; ++_i1377)
+            uint32_t _size1366;
+            ::apache::thrift::protocol::TType _etype1369;
+            xfer += iprot->readListBegin(_etype1369, _size1366);
+            (*(this->success)).resize(_size1366);
+            uint32_t _i1370;
+            for (_i1370 = 0; _i1370 < _size1366; ++_i1370)
             {
-              xfer += iprot->readString((*(this->success))[_i1377]);
+              xfer += iprot->readString((*(this->success))[_i1370]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8910,14 +8910,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(:
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1378;
-            ::apache::thrift::protocol::TType _etype1381;
-            xfer += iprot->readListBegin(_etype1381, _size1378);
-            this->success.resize(_size1378);
-            uint32_t _i1382;
-            for (_i1382 = 0; _i1382 < _size1378; ++_i1382)
+            uint32_t _size1371;
+            ::apache::thrift::protocol::TType _etype1374;
+            xfer += iprot->readListBegin(_etype1374, _size1371);
+            this->success.resize(_size1371);
+            uint32_t _i1375;
+            for (_i1375 = 0; _i1375 < _size1371; ++_i1375)
             {
-              xfer += iprot->readString(this->success[_i1382]);
+              xfer += iprot->readString(this->success[_i1375]);
             }
             xfer += iprot->readListEnd();
           }
@@ -8956,10 +8956,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write(
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1383;
-      for (_iter1383 = this->success.begin(); _iter1383 != this->success.end(); ++_iter1383)
+      std::vector<std::string> ::const_iterator _iter1376;
+      for (_iter1376 = this->success.begin(); _iter1376 != this->success.end(); ++_iter1376)
       {
-        xfer += oprot->writeString((*_iter1383));
+        xfer += oprot->writeString((*_iter1376));
       }
       xfer += oprot->writeListEnd();
     }
@@ -9004,14 +9004,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1384;
-            ::apache::thrift::protocol::TType _etype1387;
-            xfer += iprot->readListBegin(_etype1387, _size1384);
-            (*(this->success)).resize(_size1384);
-            uint32_t _i1388;
-            for (_i1388 = 0; _i1388 < _size1384; ++_i1388)
+            uint32_t _size1377;
+            ::apache::thrift::protocol::TType _etype1380;
+            xfer += iprot->readListBegin(_etype1380, _size1377);
+            (*(this->success)).resize(_size1377);
+            uint32_t _i1381;
+            for (_i1381 = 0; _i1381 < _size1377; ++_i1381)
             {
-              xfer += iprot->readString((*(this->success))[_i1388]);
+              xfer += iprot->readString((*(this->success))[_i1381]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9086,14 +9086,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_types.clear();
-            uint32_t _size1389;
-            ::apache::thrift::protocol::TType _etype1392;
-            xfer += iprot->readListBegin(_etype1392, _size1389);
-            this->tbl_types.resize(_size1389);
-            uint32_t _i1393;
-            for (_i1393 = 0; _i1393 < _size1389; ++_i1393)
+            uint32_t _size1382;
+            ::apache::thrift::protocol::TType _etype1385;
+            xfer += iprot->readListBegin(_etype1385, _size1382);
+            this->tbl_types.resize(_size1382);
+            uint32_t _i1386;
+            for (_i1386 = 0; _i1386 < _size1382; ++_i1386)
             {
-              xfer += iprot->readString(this->tbl_types[_i1393]);
+              xfer += iprot->readString(this->tbl_types[_i1386]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9130,10 +9130,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_types.size()));
-    std::vector<std::string> ::const_iterator _iter1394;
-    for (_iter1394 = this->tbl_types.begin(); _iter1394 != this->tbl_types.end(); ++_iter1394)
+    std::vector<std::string> ::const_iterator _iter1387;
+    for (_iter1387 = this->tbl_types.begin(); _iter1387 != this->tbl_types.end(); ++_iter1387)
     {
-      xfer += oprot->writeString((*_iter1394));
+      xfer += oprot->writeString((*_iter1387));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9165,10 +9165,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_types)).size()));
-    std::vector<std::string> ::const_iterator _iter1395;
-    for (_iter1395 = (*(this->tbl_types)).begin(); _iter1395 != (*(this->tbl_types)).end(); ++_iter1395)
+    std::vector<std::string> ::const_iterator _iter1388;
+    for (_iter1388 = (*(this->tbl_types)).begin(); _iter1388 != (*(this->tbl_types)).end(); ++_iter1388)
     {
-      xfer += oprot->writeString((*_iter1395));
+      xfer += oprot->writeString((*_iter1388));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9209,14 +9209,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1396;
-            ::apache::thrift::protocol::TType _etype1399;
-            xfer += iprot->readListBegin(_etype1399, _size1396);
-            this->success.resize(_size1396);
-            uint32_t _i1400;
-            for (_i1400 = 0; _i1400 < _size1396; ++_i1400)
+            uint32_t _size1389;
+            ::apache::thrift::protocol::TType _etype1392;
+            xfer += iprot->readListBegin(_etype1392, _size1389);
+            this->success.resize(_size1389);
+            uint32_t _i1393;
+            for (_i1393 = 0; _i1393 < _size1389; ++_i1393)
             {
-              xfer += this->success[_i1400].read(iprot);
+              xfer += this->success[_i1393].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9255,10 +9255,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<TableMeta> ::const_iterator _iter1401;
-      for (_iter1401 = this->success.begin(); _iter1401 != this->success.end(); ++_iter1401)
+      std::vector<TableMeta> ::const_iterator _iter1394;
+      for (_iter1394 = this->success.begin(); _iter1394 != this->success.end(); ++_iter1394)
       {
-        xfer += (*_iter1401).write(oprot);
+        xfer += (*_iter1394).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -9303,14 +9303,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1402;
-            ::apache::thrift::protocol::TType _etype1405;
-            xfer += iprot->readListBegin(_etype1405, _size1402);
-            (*(this->success)).resize(_size1402);
-            uint32_t _i1406;
-            for (_i1406 = 0; _i1406 < _size1402; ++_i1406)
+            uint32_t _size1395;
+            ::apache::thrift::protocol::TType _etype1398;
+            xfer += iprot->readListBegin(_etype1398, _size1395);
+            (*(this->success)).resize(_size1395);
+            uint32_t _i1399;
+            for (_i1399 = 0; _i1399 < _size1395; ++_i1399)
             {
-              xfer += (*(this->success))[_i1406].read(iprot);
+              xfer += (*(this->success))[_i1399].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -9448,14 +9448,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1407;
-            ::apache::thrift::protocol::TType _etype1410;
-            xfer += iprot->readListBegin(_etype1410, _size1407);
-            this->success.resize(_size1407);
-            uint32_t _i1411;
-            for (_i1411 = 0; _i1411 < _size1407; ++_i1411)
+            uint32_t _size1400;
+            ::apache::thrift::protocol::TType _etype1403;
+            xfer += iprot->readListBegin(_etype1403, _size1400);
+            this->success.resize(_size1400);
+            uint32_t _i1404;
+            for (_i1404 = 0; _i1404 < _size1400; ++_i1404)
             {
-              xfer += iprot->readString(this->success[_i1411]);
+              xfer += iprot->readString(this->success[_i1404]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9494,10 +9494,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1412;
-      for (_iter1412 = this->success.begin(); _iter1412 != this->success.end(); ++_iter1412)
+      std::vector<std::string> ::const_iterator _iter1405;
+      for (_iter1405 = this->success.begin(); _iter1405 != this->success.end(); ++_iter1405)
       {
-        xfer += oprot->writeString((*_iter1412));
+        xfer += oprot->writeString((*_iter1405));
       }
       xfer += oprot->writeListEnd();
     }
@@ -9542,14 +9542,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1413;
-            ::apache::thrift::protocol::TType _etype1416;
-            xfer += iprot->readListBegin(_etype1416, _size1413);
-            (*(this->success)).resize(_size1413);
-            uint32_t _i1417;
-            for (_i1417 = 0; _i1417 < _size1413; ++_i1417)
+            uint32_t _size1406;
+            ::apache::thrift::protocol::TType _etype1409;
+            xfer += iprot->readListBegin(_etype1409, _size1406);
+            (*(this->success)).resize(_size1406);
+            uint32_t _i1410;
+            for (_i1410 = 0; _i1410 < _size1406; ++_i1410)
             {
-              xfer += iprot->readString((*(this->success))[_i1417]);
+              xfer += iprot->readString((*(this->success))[_i1410]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9859,14 +9859,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->tbl_names.clear();
-            uint32_t _size1418;
-            ::apache::thrift::protocol::TType _etype1421;
-            xfer += iprot->readListBegin(_etype1421, _size1418);
-            this->tbl_names.resize(_size1418);
-            uint32_t _i1422;
-            for (_i1422 = 0; _i1422 < _size1418; ++_i1422)
+            uint32_t _size1411;
+            ::apache::thrift::protocol::TType _etype1414;
+            xfer += iprot->readListBegin(_etype1414, _size1411);
+            this->tbl_names.resize(_size1411);
+            uint32_t _i1415;
+            for (_i1415 = 0; _i1415 < _size1411; ++_i1415)
             {
-              xfer += iprot->readString(this->tbl_names[_i1422]);
+              xfer += iprot->readString(this->tbl_names[_i1415]);
             }
             xfer += iprot->readListEnd();
           }
@@ -9899,10 +9899,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter1423;
-    for (_iter1423 = this->tbl_names.begin(); _iter1423 != this->tbl_names.end(); ++_iter1423)
+    std::vector<std::string> ::const_iterator _iter1416;
+    for (_iter1416 = this->tbl_names.begin(); _iter1416 != this->tbl_names.end(); ++_iter1416)
     {
-      xfer += oprot->writeString((*_iter1423));
+      xfer += oprot->writeString((*_iter1416));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9930,10 +9930,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th
   xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1424;
-    for (_iter1424 = (*(this->tbl_names)).begin(); _iter1424 != (*(this->tbl_names)).end(); ++_iter1424)
+    std::vector<std::string> ::const_iterator _iter1417;
+    for (_iter1417 = (*(this->tbl_names)).begin(); _iter1417 != (*(this->tbl_names)).end(); ++_iter1417)
     {
-      xfer += oprot->writeString((*_iter1424));
+      xfer += oprot->writeString((*_iter1417));
     }
     xfer += oprot->writeListEnd();
   }
@@ -9974,14 +9974,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1425;
-            ::apache::thrift::protocol::TType _etype1428;
-            xfer += iprot->readListBegin(_etype1428, _size1425);
-            this->success.resize(_size1425);
-            uint32_t _i1429;
-            for (_i1429 = 0; _i1429 < _size1425; ++_i1429)
+            uint32_t _size1418;
+            ::apache::thrift::protocol::TType _etype1421;
+            xfer += iprot->readListBegin(_etype1421, _size1418);
+            this->success.resize(_size1418);
+            uint32_t _i1422;
+            for (_i1422 = 0; _i1422 < _size1418; ++_i1422)
             {
-              xfer += this->success[_i1429].read(iprot);
+              xfer += this->success[_i1422].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10012,10 +10012,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::vector<Table> ::const_iterator _iter1430;
-      for (_iter1430 = this->success.begin(); _iter1430 != this->success.end(); ++_iter1430)
+      std::vector<Table> ::const_iterator _iter1423;
+      for (_iter1423 = this->success.begin(); _iter1423 != this->success.end(); ++_iter1423)
       {
-        xfer += (*_iter1430).write(oprot);
+        xfer += (*_iter1423).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -10056,14 +10056,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1431;
-            ::apache::thrift::protocol::TType _etype1434;
-            xfer += iprot->readListBegin(_etype1434, _size1431);
-            (*(this->success)).resize(_size1431);
-            uint32_t _i1435;
-            for (_i1435 = 0; _i1435 < _size1431; ++_i1435)
+            uint32_t _size1424;
+            ::apache::thrift::protocol::TType _etype1427;
+            xfer += iprot->readListBegin(_etype1427, _size1424);
+            (*(this->success)).resize(_size1424);
+            uint32_t _i1428;
+            for (_i1428 = 0; _i1428 < _size1424; ++_i1428)
             {
-              xfer += (*(this->success))[_i1435].read(iprot);
+              xfer += (*(this->success))[_i1428].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -10585,29 +10585,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(::
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->dbname);
-          this->__isset.dbname = true;
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->creation_metadata.read(iprot);
+          this->__isset.creation_metadata = true;
         } else {
           xfer += iprot->skip(ftype);
         }
         break;
       case 2:
-        if (ftype == ::apache::thrift::protocol::T_LIST) {
-          {
-            this->tbl_names.clear();
-            uint32_t _size1436;
-            ::apache::thrift::protocol::TType _etype1439;
-            xfer += iprot->readListBegin(_etype1439, _size1436);
-            this->tbl_names.resize(_size1436);
-            uint32_t _i1440;
-            for (_i1440 = 0; _i1440 < _size1436; ++_i1440)
-            {
-              xfer += iprot->readString(this->tbl_names[_i1440]);
-            }
-            xfer += iprot->readListEnd();
-          }
-          this->__isset.tbl_names = true;
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->validTxnList);
+          this->__isset.validTxnList = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -10629,20 +10617,12 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(:
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
   xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_materialization_invalidation_info_args");
 
-  xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString(this->dbname);
+  xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->creation_metadata.write(oprot);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
-  {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tbl_names.size()));
-    std::vector<std::string> ::const_iterator _iter1441;
-    for (_iter1441 = this->tbl_names.begin(); _iter1441 != this->tbl_names.end(); ++_iter1441)
-    {
-      xfer += oprot->writeString((*_iter1441));
-    }
-    xfer += oprot->writeListEnd();
-  }
+  xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->validTxnList);
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -10660,20 +10640,12 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write(
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
   xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_materialization_invalidation_info_pargs");
 
-  xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
-  xfer += oprot->writeString((*(this->dbname)));
+  xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->creation_metadata)).write(oprot);
   xfer += oprot->writeFieldEnd();
 
-  xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2);
-  {
-    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->tbl_names)).size()));
-    std::vector<std::string> ::const_iterator _iter1442;
-    for (_iter1442 = (*(this->tbl_names)).begin(); _iter1442 != (*(this->tbl_names)).end(); ++_iter1442)
-    {
-      xfer += oprot->writeString((*_iter1442));
-    }
-    xfer += oprot->writeListEnd();
-  }
+  xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->validTxnList)));
   xfer += oprot->writeFieldEnd();
 
   xfer += oprot->writeFieldStop();
@@ -10708,23 +10680,8 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read(
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_MAP) {
-          {
-            this->success.clear();
-            uint32_t _size1443;
-            ::apache::thrift::protocol::TType _ktype1444;
-            ::apache::thrift::protocol::TType _vtype1445;
-            xfer += iprot->readMapBegin(_ktype1444, _vtype1445, _size1443);
-            uint32_t _i1447;
-            for (_i1447 = 0; _i1447 < _size1443; ++_i1447)
-            {
-              std::string _key1448;
-              xfer += iprot->readString(_key1448);
-              Materialization& _val1449 = this->success[_key1448];
-              xfer += _val1449.read(iprot);
-            }
-            xfer += iprot->readMapEnd();
-          }
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -10773,17 +10730,8 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write
   xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_materialization_invalidation_info_result");
 
   if (this->__isset.success) {
-    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0);
-    {
-      xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->success.size()));
-      std::map<std::string, Materialization> ::const_iterator _iter1450;
-      for (_iter1450 = this->success.begin(); _iter1450 != this->success.end(); ++_iter1450)
-      {
-        xfer += oprot->writeString(_iter1450->first);
-        xfer += _iter1450->second.write(oprot);
-      }
-      xfer += oprot->writeMapEnd();
-    }
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
     xfer += oprot->writeFieldEnd();
   } else if (this->__isset.o1) {
     xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
@@ -10830,23 +10778,8 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read
     switch (fid)
     {
       case 0:
-        if (ftype == ::apache::thrift::protocol::T_MAP) {
-          {
-            (*(this->success)).clear();
-            uint32_t _size1451;
-            ::apache::thrift::protocol::TType _ktype1452;
-            ::apache::thrift::protocol::TType _vtype1453;
-            xfer += iprot->readMapBegin(_ktype1452, _vtype1453, _size1451);
-            uint32_t _i1455;
-            for (_i1455 = 0; _i1455 < _size1451; ++_i1455)
-            {
-              std::string _key1456;
-              xfer += iprot->readString(_key1456);
-              Materialization& _val1457 = (*(this->success))[_key1456];
-              xfer += _val1457.read(iprot);
-            }
-            xfer += iprot->readMapEnd();
-          }
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
           this->__isset.success = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -11304,14 +11237,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->success.clear();
-            uint32_t _size1458;
-            ::apache::thrift::protocol::TType _etype1461;
-            xfer += iprot->readListBegin(_etype1461, _size1458);
-            this->success.resize(_size1458);
-            uint32_t _i1462;
-            for (_i1462 = 0; _i1462 < _size1458; ++_i1462)
+            uint32_t _size1429;
+            ::apache::thrift::protocol::TType _etype1432;
+            xfer += iprot->readListBegin(_etype1432, _size1429);
+            this->success.resize(_size1429);
+            uint32_t _i1433;
+            for (_i1433 = 0; _i1433 < _size1429; ++_i1433)
             {
-              xfer += iprot->readString(this->success[_i1462]);
+              xfer += iprot->readString(this->success[_i1433]);
             }
             xfer += iprot->readListEnd();
           }
@@ -11366,10 +11299,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t
     xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
-      std::vector<std::string> ::const_iterator _iter1463;
-      for (_iter1463 = this->success.begin(); _iter1463 != this->success.end(); ++_iter1463)
+      std::vector<std::string> ::const_iterator _iter1434;
+      for (_iter1434 = this->success.begin(); _iter1434 != this->success.end(); ++_iter1434)
       {
-        xfer += oprot->writeString((*_iter1463));
+        xfer += oprot->writeString((*_iter1434));
       }
       xfer += oprot->writeListEnd();
     }
@@ -11422,14 +11355,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             (*(this->success)).clear();
-            uint32_t _size1464;
-            ::apache::thrift::protocol::TType _etype1467;
-            xfer += iprot->readListBegin(_etype1467, _size1464);
-            (*(this->success)).resize(_size1464);
-            uint32_t _i1468;
-            for (_i1468 = 0; _i1468 < _size1464; ++_i1468)
+            uint32_t _size1435;
+            ::apache::thrift::protocol::TType _etype1438;
+            xfer += iprot->readListBegin(_etype1438, _size1435);
+            (*(this->success)).resize(_size1435);
+            uint32_t _i1439;
+            for (_i1439 = 0; _i1439 < _size1435; ++_i1439)
             {
-              xfer += iprot->readString((*(this->success))[_i1468]);
+              xfer += iprot->readString((*(this->success))[_i1439]);
             }
             xfer += iprot->readListEnd();
           }
@@ -12763,14 +12696,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1469;
-            ::apache::thrift::protocol::TType _etype1472;
-            xfer += iprot->readListBegin(_etype1472, _size1469);
-            this->new_parts.resize(_size1469);
-            uint32_t _i1473;
-            for (_i1473 = 0; _i1473 < _size1469; ++_i1473)
+            uint32_t _size1440;
+            ::apache::thrift::protocol::TType _etype1443;
+            xfer += iprot->readListBegin(_etype1443, _size1440);
+            this->new_parts.resize(_size1440);
+            uint32_t _i1444;
+            for (_i1444 = 0; _i1444 < _size1440; ++_i1444)
             {
-              xfer += this->new_parts[_i1473].read(iprot);
+              xfer += this->new_parts[_i1444].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -12799,10 +12732,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<Partition> ::const_iterator _iter1474;
-    for (_iter1474 = this->new_parts.begin(); _iter1474 != this->new_parts.end(); ++_iter1474)
+    std::vector<Partition> ::const_iterator _iter1445;
+    for (_iter1445 = this->new_parts.begin(); _iter1445 != this->new_parts.end(); ++_iter1445)
     {
-      xfer += (*_iter1474).write(oprot);
+      xfer += (*_iter1445).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -12826,10 +12759,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<Partition> ::const_iterator _iter1475;
-    for (_iter1475 = (*(this->new_parts)).begin(); _iter1475 != (*(this->new_parts)).end(); ++_iter1475)
+    std::vector<Partition> ::const_iterator _iter1446;
+    for (_iter1446 = (*(this->new_parts)).begin(); _iter1446 != (*(this->new_parts)).end(); ++_iter1446)
     {
-      xfer += (*_iter1475).write(oprot);
+      xfer += (*_iter1446).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -13038,14 +12971,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->new_parts.clear();
-            uint32_t _size1476;
-            ::apache::thrift::protocol::TType _etype1479;
-            xfer += iprot->readListBegin(_etype1479, _size1476);
-            this->new_parts.resize(_size1476);
-            uint32_t _i1480;
-            for (_i1480 = 0; _i1480 < _size1476; ++_i1480)
+            uint32_t _size1447;
+            ::apache::thrift::protocol::TType _etype1450;
+            xfer += iprot->readListBegin(_etype1450, _size1447);
+            this->new_parts.resize(_size1447);
+            uint32_t _i1451;
+            for (_i1451 = 0; _i1451 < _size1447; ++_i1451)
             {
-              xfer += this->new_parts[_i1480].read(iprot);
+              xfer += this->new_parts[_i1451].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -13074,10 +13007,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift::
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->new_parts.size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1481;
-    for (_iter1481 = this->new_parts.begin(); _iter1481 != this->new_parts.end(); ++_iter1481)
+    std::vector<PartitionSpec> ::const_iterator _iter1452;
+    for (_iter1452 = this->new_parts.begin(); _iter1452 != this->new_parts.end(); ++_iter1452)
     {
-      xfer += (*_iter1481).write(oprot);
+      xfer += (*_iter1452).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -13101,10 +13034,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift:
   xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>((*(this->new_parts)).size()));
-    std::vector<PartitionSpec> ::const_iterator _iter1482;
-    for (_iter1482 = (*(this->new_parts)).begin(); _iter1482 != (*(this->new_parts)).end(); ++_iter1482)
+    std::vector<PartitionSpec> ::const_iterator _iter1453;
+    for (_iter1453 = (*(this->new_parts)).begin(); _iter1453 != (*(this->new_parts)).end(); ++_iter1453)
     {
-      xfer += (*_iter1482).write(oprot);
+      xfer += (*_iter1453).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -13329,14 +13262,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1483;
-            ::apache::thrift::protocol::TType _etype1486;
-            xfer += iprot->readListBegin(_etype1486, _size1483);
-            this->part_vals.resize(_size1483);
-            uint32_t _i1487;
-            for (_i1487 = 0; _i1487 < _size1483; ++_i1487)
+            uint32_t _size1454;
+            ::apache::thrift::protocol::TType _etype1457;
+            xfer += iprot->readListBegin(_etype1457, _size1454);
+            this->part_vals.resize(_size1454);
+            uint32_t _i1458;
+            for (_i1458 = 0; _i1458 < _size1454; ++_i1458)
             {
-              xfer += iprot->readString(this->part_vals[_i1487]);
+              xfer += iprot->readString(this->part_vals[_i1458]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13373,10 +13306,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1488;
-    for (_iter1488 = this->part_vals.begin(); _iter1488 != this->part_vals.end(); ++_iter1488)
+    std::vector<std::string> ::const_iterator _iter1459;
+    for (_iter1459 = this->part_vals.begin(); _iter1459 != this->part_vals.end(); ++_iter1459)
     {
-      xfer += oprot->writeString((*_iter1488));
+      xfer += oprot->writeString((*_iter1459));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13408,10 +13341,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1489;
-    for (_iter1489 = (*(this->part_vals)).begin(); _iter1489 != (*(this->part_vals)).end(); ++_iter1489)
+    std::vector<std::string> ::const_iterator _iter1460;
+    for (_iter1460 = (*(this->part_vals)).begin(); _iter1460 != (*(this->part_vals)).end(); ++_iter1460)
     {
-      xfer += oprot->writeString((*_iter1489));
+      xfer += oprot->writeString((*_iter1460));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13883,14 +13816,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1490;
-            ::apache::thrift::protocol::TType _etype1493;
-            xfer += iprot->readListBegin(_etype1493, _size1490);
-            this->part_vals.resize(_size1490);
-            uint32_t _i1494;
-            for (_i1494 = 0; _i1494 < _size1490; ++_i1494)
+            uint32_t _size1461;
+            ::apache::thrift::protocol::TType _etype1464;
+            xfer += iprot->readListBegin(_etype1464, _size1461);
+            this->part_vals.resize(_size1461);
+            uint32_t _i1465;
+            for (_i1465 = 0; _i1465 < _size1461; ++_i1465)
             {
-              xfer += iprot->readString(this->part_vals[_i1494]);
+              xfer += iprot->readString(this->part_vals[_i1465]);
             }
             xfer += iprot->readListEnd();
           }
@@ -13935,10 +13868,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1495;
-    for (_iter1495 = this->part_vals.begin(); _iter1495 != this->part_vals.end(); ++_iter1495)
+    std::vector<std::string> ::const_iterator _iter1466;
+    for (_iter1466 = this->part_vals.begin(); _iter1466 != this->part_vals.end(); ++_iter1466)
     {
-      xfer += oprot->writeString((*_iter1495));
+      xfer += oprot->writeString((*_iter1466));
     }
     xfer += oprot->writeListEnd();
   }
@@ -13974,10 +13907,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1496;
-    for (_iter1496 = (*(this->part_vals)).begin(); _iter1496 != (*(this->part_vals)).end(); ++_iter1496)
+    std::vector<std::string> ::const_iterator _iter1467;
+    for (_iter1467 = (*(this->part_vals)).begin(); _iter1467 != (*(this->part_vals)).end(); ++_iter1467)
     {
-      xfer += oprot->writeString((*_iter1496));
+      xfer += oprot->writeString((*_iter1467));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14780,14 +14713,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1497;
-            ::apache::thrift::protocol::TType _etype1500;
-            xfer += iprot->readListBegin(_etype1500, _size1497);
-            this->part_vals.resize(_size1497);
-            uint32_t _i1501;
-            for (_i1501 = 0; _i1501 < _size1497; ++_i1501)
+            uint32_t _size1468;
+            ::apache::thrift::protocol::TType _etype1471;
+            xfer += iprot->readListBegin(_etype1471, _size1468);
+            this->part_vals.resize(_size1468);
+            uint32_t _i1472;
+            for (_i1472 = 0; _i1472 < _size1468; ++_i1472)
             {
-              xfer += iprot->readString(this->part_vals[_i1501]);
+              xfer += iprot->readString(this->part_vals[_i1472]);
             }
             xfer += iprot->readListEnd();
           }
@@ -14832,10 +14765,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1502;
-    for (_iter1502 = this->part_vals.begin(); _iter1502 != this->part_vals.end(); ++_iter1502)
+    std::vector<std::string> ::const_iterator _iter1473;
+    for (_iter1473 = this->part_vals.begin(); _iter1473 != this->part_vals.end(); ++_iter1473)
     {
-      xfer += oprot->writeString((*_iter1502));
+      xfer += oprot->writeString((*_iter1473));
     }
     xfer += oprot->writeListEnd();
   }
@@ -14871,10 +14804,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1503;
-    for (_iter1503 = (*(this->part_vals)).begin(); _iter1503 != (*(this->part_vals)).end(); ++_iter1503)
+    std::vector<std::string> ::const_iterator _iter1474;
+    for (_iter1474 = (*(this->part_vals)).begin(); _iter1474 != (*(this->part_vals)).end(); ++_iter1474)
     {
-      xfer += oprot->writeString((*_iter1503));
+      xfer += oprot->writeString((*_iter1474));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15083,14 +15016,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read(
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1504;
-            ::apache::thrift::protocol::TType _etype1507;
-            xfer += iprot->readListBegin(_etype1507, _size1504);
-            this->part_vals.resize(_size1504);
-            uint32_t _i1508;
-            for (_i1508 = 0; _i1508 < _size1504; ++_i1508)
+            uint32_t _size1475;
+            ::apache::thrift::protocol::TType _etype1478;
+            xfer += iprot->readListBegin(_etype1478, _size1475);
+            this->part_vals.resize(_size1475);
+            uint32_t _i1479;
+            for (_i1479 = 0; _i1479 < _size1475; ++_i1479)
             {
-              xfer += iprot->readString(this->part_vals[_i1508]);
+              xfer += iprot->readString(this->part_vals[_i1479]);
             }
             xfer += iprot->readListEnd();
           }
@@ -15143,10 +15076,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1509;
-    for (_iter1509 = this->part_vals.begin(); _iter1509 != this->part_vals.end(); ++_iter1509)
+    std::vector<std::string> ::const_iterator _iter1480;
+    for (_iter1480 = this->part_vals.begin(); _iter1480 != this->part_vals.end(); ++_iter1480)
     {
-      xfer += oprot->writeString((*_iter1509));
+      xfer += oprot->writeString((*_iter1480));
     }
     xfer += oprot->writeListEnd();
   }
@@ -15186,10 +15119,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>((*(this->part_vals)).size()));
-    std::vector<std::string> ::const_iterator _iter1510;
-    for (_iter1510 = (*(this->part_vals)).begin(); _iter1510 != (*(this->part_vals)).end(); ++_iter1510)
+    std::vector<std::string> ::const_iterator _iter1481;
+    for (_iter1481 = (*(this->part_vals)).begin(); _iter1481 != (*(this->part_vals)).end(); ++_iter1481)
     {
-      xfer += oprot->writeString((*_iter1510));
+      xfer += oprot->writeString((*_iter1481));
     }
     xfer += oprot->writeListEnd();
   }
@@ -16195,14 +16128,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->part_vals.clear();
-            uint32_t _size1511;
-            ::apache::thrift::protocol::TType _etype1514;
-            xfer += iprot->readListBegin(_etype1514, _size1511);
-            this->part_vals.resize(_size1511);
-            uint32_t _i1515;
-            for (_i1515 = 0; _i1515 < _size1511; ++_i1515)
+            uint32_t _size1482;
+            ::apache::thrift::protocol::TType _etype1485;
+            xfer += iprot->readListBegin(_etype1485, _size1482);
+            this->part_vals.resize(_size1482);
+            uint32_t _i1486;
+            for (_i1486 = 0; _i1486 < _size1482; ++_i1486)
             {
-              xfer += iprot->readString(this->part_vals[_i1515]);
+              xfer += iprot->readString(this->part_vals[_i1486]);
             }
             xfer += iprot->readListEnd();
           }
@@ -16239,10 +16172,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco
   xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->part_vals.size()));
-    std::vector<std::string> ::const_iterator _iter1516;
-    for (_iter1516 = this->part_vals.begin(); _iter1516 != this->part_vals.end(); ++_iter1516)
+    std::vector<std::string> ::const_it

<TRUNCATED>

[33/48] hive git commit: HIVE-19940: Push predicates with deterministic UDFs with RBO (Janaki Lahorani, reviewed by Vineet Garg, Naveen Gangam)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out b/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
new file mode 100644
index 0000000..b96a0e2
--- /dev/null
+++ b/ql/src/test/results/clientpositive/ppd_deterministic_expr.q.out
@@ -0,0 +1,553 @@
+PREHOOK: query: CREATE TABLE `testb`(
+   `cola` string COMMENT '',
+   `colb` string COMMENT '',
+   `colc` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+
+STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testb
+POSTHOOK: query: CREATE TABLE `testb`(
+   `cola` string COMMENT '',
+   `colb` string COMMENT '',
+   `colc` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+
+STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testb
+PREHOOK: query: CREATE TABLE `testa`(
+   `col1` string COMMENT '',
+   `col2` string COMMENT '',
+   `col3` string COMMENT '',
+   `col4` string COMMENT '',
+   `col5` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+STORED AS AVRO
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@testa
+POSTHOOK: query: CREATE TABLE `testa`(
+   `col1` string COMMENT '',
+   `col2` string COMMENT '',
+   `col3` string COMMENT '',
+   `col4` string COMMENT '',
+   `col5` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+STORED AS AVRO
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@testa
+PREHOOK: query: insert into testA partition (part1='US', part2='ABC', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=US/part2=ABC/part3=123
+POSTHOOK: query: insert into testA partition (part1='US', part2='ABC', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=US/part2=ABC/part3=123
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=ABC,part3=123).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='UK', part2='DEF', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=UK/part2=DEF/part3=123
+POSTHOOK: query: insert into testA partition (part1='UK', part2='DEF', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=UK/part2=DEF/part3=123
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=UK,part2=DEF,part3=123).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='US', part2='DEF', part3='200')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: query: insert into testA partition (part1='US', part2='DEF', part3='200')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=US,part2=DEF,part3=200).col5 SCRIPT []
+PREHOOK: query: insert into testA partition (part1='CA', part2='ABC', part3='300')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testa@part1=CA/part2=ABC/part3=300
+POSTHOOK: query: insert into testA partition (part1='CA', part2='ABC', part3='300')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testa@part1=CA/part2=ABC/part3=300
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col1 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col2 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col3 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col4 SCRIPT []
+POSTHOOK: Lineage: testa PARTITION(part1=CA,part2=ABC,part3=300).col5 SCRIPT []
+PREHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='300')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=CA/part2=ABC/part3=300
+POSTHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='300')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=CA/part2=ABC/part3=300
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=300).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='400')
+values ( '600', '700', 'abc'), ( '601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=CA/part2=ABC/part3=400
+POSTHOOK: query: insert into testB partition (part1='CA', part2='ABC', part3='400')
+values ( '600', '700', 'abc'), ( '601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=CA/part2=ABC/part3=400
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=CA,part2=ABC,part3=400).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='UK', part2='PQR', part3='500')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=UK/part2=PQR/part3=500
+POSTHOOK: query: insert into testB partition (part1='UK', part2='PQR', part3='500')
+values ('600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=UK/part2=PQR/part3=500
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=UK,part2=PQR,part3=500).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='US', part2='DEF', part3='200')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: query: insert into testB partition (part1='US', part2='DEF', part3='200')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=DEF,part3=200).colc SCRIPT []
+PREHOOK: query: insert into testB partition (part1='US', part2='PQR', part3='123')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@testb@part1=US/part2=PQR/part3=123
+POSTHOOK: query: insert into testB partition (part1='US', part2='PQR', part3='123')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@testb@part1=US/part2=PQR/part3=123
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).cola SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).colb SCRIPT []
+POSTHOOK: Lineage: testb PARTITION(part1=US,part2=PQR,part3=123).colc SCRIPT []
+PREHOOK: query: create view viewDeterministicUDFA partitioned on (vpart1, vpart2, vpart3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testa
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testa
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewDeterministicUDFA
+POSTHOOK: query: create view viewDeterministicUDFA partitioned on (vpart1, vpart2, vpart3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testa
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testa
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewDeterministicUDFA
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol1 EXPRESSION [(testa)testa.FieldSchema(name:col1, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol2 EXPRESSION [(testa)testa.FieldSchema(name:col2, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol3 EXPRESSION [(testa)testa.FieldSchema(name:col3, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol4 EXPRESSION [(testa)testa.FieldSchema(name:col4, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFA.vcol5 EXPRESSION [(testa)testa.FieldSchema(name:col5, type:string, comment:), ]
+PREHOOK: query: create view viewDeterministicUDFB partitioned on (vpart1, vpart2, vpart3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testb
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testb
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewDeterministicUDFB
+POSTHOOK: query: create view viewDeterministicUDFB partitioned on (vpart1, vpart2, vpart3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testb
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testb
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewDeterministicUDFB
+POSTHOOK: Lineage: viewDeterministicUDFB.vcola EXPRESSION [(testb)testb.FieldSchema(name:cola, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFB.vcolb EXPRESSION [(testb)testb.FieldSchema(name:colb, type:string, comment:), ]
+POSTHOOK: Lineage: viewDeterministicUDFB.vcolc EXPRESSION [(testb)testb.FieldSchema(name:colc, type:string, comment:), ]
+PREHOOK: query: create view viewNoUDFA partitioned on (part1, part2, part3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ part1,
+ part2,
+ part3
+ from testa
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testa
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewNoUDFA
+POSTHOOK: query: create view viewNoUDFA partitioned on (part1, part2, part3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ part1,
+ part2,
+ part3
+ from testa
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testa
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewNoUDFA
+POSTHOOK: Lineage: viewNoUDFA.vcol1 EXPRESSION [(testa)testa.FieldSchema(name:col1, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol2 EXPRESSION [(testa)testa.FieldSchema(name:col2, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol3 EXPRESSION [(testa)testa.FieldSchema(name:col3, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol4 EXPRESSION [(testa)testa.FieldSchema(name:col4, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFA.vcol5 EXPRESSION [(testa)testa.FieldSchema(name:col5, type:string, comment:), ]
+PREHOOK: query: create view viewNoUDFB partitioned on (part1, part2, part3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ part1,
+ part2,
+ part3
+ from testb
+where part1 in ('US', 'CA')
+PREHOOK: type: CREATEVIEW
+PREHOOK: Input: default@testb
+PREHOOK: Output: database:default
+PREHOOK: Output: default@viewNoUDFB
+POSTHOOK: query: create view viewNoUDFB partitioned on (part1, part2, part3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ part1,
+ part2,
+ part3
+ from testb
+where part1 in ('US', 'CA')
+POSTHOOK: type: CREATEVIEW
+POSTHOOK: Input: default@testb
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@viewNoUDFB
+POSTHOOK: Lineage: viewNoUDFB.vcola EXPRESSION [(testb)testb.FieldSchema(name:cola, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFB.vcolb EXPRESSION [(testb)testb.FieldSchema(name:colb, type:string, comment:), ]
+POSTHOOK: Lineage: viewNoUDFB.vcolc EXPRESSION [(testb)testb.FieldSchema(name:colc, type:string, comment:), ]
+PREHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testa
+            filterExpr: (part1) IN ('US', 'CA') (type: boolean)
+            Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( col1 AS decimal(38,18)) (type: decimal(38,18)), CAST( col2 AS decimal(38,18)) (type: decimal(38,18)), CAST( col3 AS decimal(38,18)) (type: decimal(38,18))
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: 'US' (type: char(2)), 'DEF' (type: char(3)), '200' (type: char(3))
+                sort order: +++
+                Map-reduce partition columns: 'US' (type: char(2)), 'DEF' (type: char(3)), '200' (type: char(3))
+                Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18))
+          TableScan
+            alias: testb
+            filterExpr: (part1) IN ('US', 'CA') (type: boolean)
+            Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( cola AS decimal(38,18)) (type: decimal(38,18)), CAST( colb AS decimal(38,18)) (type: decimal(38,18)), CAST( part1 AS CHAR(2)) (type: char(2)), CAST( part2 AS CHAR(3)) (type: char(3))
+              outputColumnNames: _col0, _col1, _col3, _col4
+              Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col3 (type: char(2)), _col4 (type: char(3)), '200' (type: char(3))
+                sort order: +++
+                Map-reduce partition columns: _col3 (type: char(2)), _col4 (type: char(3)), '200' (type: char(3))
+                Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18))
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col5 (type: char(2)), _col6 (type: char(3)), _col7 (type: char(3))
+            1 _col3 (type: char(2)), _col4 (type: char(3)), _col5 (type: char(3))
+          outputColumnNames: _col0, _col1, _col2, _col8, _col9
+          Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col8 (type: decimal(38,18)), _col9 (type: decimal(38,18))
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testa
+PREHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@testb
+PREHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@viewdeterministicudfa
+PREHOOK: Input: default@viewdeterministicudfb
+#### A masked pattern was here ####
+POSTHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testa
+POSTHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@testb
+POSTHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@viewdeterministicudfa
+POSTHOOK: Input: default@viewdeterministicudfb
+#### A masked pattern was here ####
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	601.000000000000000000	701.000000000000000000
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	600.000000000000000000	700.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	601.000000000000000000	701.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	600.000000000000000000	700.000000000000000000
+PREHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: testa
+            filterExpr: ((part1) IN ('US', 'CA') and (part1 = 'US') and (part2 = 'DEF') and (part3 = '200')) (type: boolean)
+            Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( col1 AS decimal(38,18)) (type: decimal(38,18)), CAST( col2 AS decimal(38,18)) (type: decimal(38,18)), CAST( col3 AS decimal(38,18)) (type: decimal(38,18))
+              outputColumnNames: _col0, _col1, _col2
+              Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: 'US' (type: string), 'DEF' (type: string), '200' (type: string)
+                sort order: +++
+                Map-reduce partition columns: 'US' (type: string), 'DEF' (type: string), '200' (type: string)
+                Statistics: Num rows: 2 Data size: 4580 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18))
+          TableScan
+            alias: testb
+            filterExpr: ((part1) IN ('US', 'CA') and (part3 = '200') and part1 is not null and part2 is not null) (type: boolean)
+            Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+            Select Operator
+              expressions: CAST( cola AS decimal(38,18)) (type: decimal(38,18)), CAST( colb AS decimal(38,18)) (type: decimal(38,18)), part1 (type: string), part2 (type: string)
+              outputColumnNames: _col0, _col1, _col3, _col4
+              Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+              Reduce Output Operator
+                key expressions: _col3 (type: string), _col4 (type: string), '200' (type: string)
+                sort order: +++
+                Map-reduce partition columns: _col3 (type: string), _col4 (type: string), '200' (type: string)
+                Statistics: Num rows: 2 Data size: 3180 Basic stats: COMPLETE Column stats: NONE
+                value expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18))
+      Reduce Operator Tree:
+        Join Operator
+          condition map:
+               Inner Join 0 to 1
+          keys:
+            0 _col5 (type: string), _col6 (type: string), _col7 (type: string)
+            1 _col3 (type: string), _col4 (type: string), _col5 (type: string)
+          outputColumnNames: _col0, _col1, _col2, _col8, _col9
+          Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+          Select Operator
+            expressions: _col0 (type: decimal(38,18)), _col1 (type: decimal(38,18)), _col2 (type: decimal(38,18)), _col8 (type: decimal(38,18)), _col9 (type: decimal(38,18))
+            outputColumnNames: _col0, _col1, _col2, _col3, _col4
+            Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+            File Output Operator
+              compressed: false
+              Statistics: Num rows: 2 Data size: 5038 Basic stats: COMPLETE Column stats: NONE
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@testa
+PREHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@testb
+PREHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+PREHOOK: Input: default@viewnoudfa
+PREHOOK: Input: default@viewnoudfb
+#### A masked pattern was here ####
+POSTHOOK: query: select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@testa
+POSTHOOK: Input: default@testa@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@testb
+POSTHOOK: Input: default@testb@part1=US/part2=DEF/part3=200
+POSTHOOK: Input: default@viewnoudfa
+POSTHOOK: Input: default@viewnoudfb
+#### A masked pattern was here ####
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	601.000000000000000000	701.000000000000000000
+12.341000000000000000	1001.000000000000000000	2001.000000000000000000	600.000000000000000000	700.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	601.000000000000000000	701.000000000000000000
+12.340000000000000000	100.000000000000000000	200.000000000000000000	600.000000000000000000	700.000000000000000000

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/ppd_udf_col.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/ppd_udf_col.q.out b/ql/src/test/results/clientpositive/ppd_udf_col.q.out
index 8d858f5..dfc2d04 100644
--- a/ql/src/test/results/clientpositive/ppd_udf_col.q.out
+++ b/ql/src/test/results/clientpositive/ppd_udf_col.q.out
@@ -366,3 +366,412 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
+PREHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: ((_col2 <= 0.1) and (_col2 > 0.1)) (type: boolean)
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    Limit
+                      Number of rows: 20
+                      Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                      File Output Operator
+                        compressed: false
+                        Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                        table:
+                            input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                            output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: false (type: boolean)
+              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), '4' (type: string)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: ((key = 100) and ((value * 10) <= 200.0D)) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((value * 10) <= 200.0D) and (key = 100)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), (value * 10) (type: double)
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: (key = 100) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (key = 100) (type: boolean)
+              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col2 <= 0.1) (type: boolean)
+                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double)
+                    outputColumnNames: _col0, _col1
+                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                    Filter Operator
+                      predicate: (_col1 > 0.1) (type: boolean)
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      Limit
+                        Number of rows: 20
+                        Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                        File Output Operator
+                          compressed: false
+                          Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE
+                          table:
+                              input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                              output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: 20
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: ((key = 100) and false) (type: boolean)
+              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double)
+                outputColumnNames: _col0, _col2
+                Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: false (type: boolean)
+                  Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), '4' (type: string)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: src
+            filterExpr: ((key = 100) and ((value * 10) <= 200.0D)) (type: boolean)
+            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
+            Filter Operator
+              predicate: (((value * 10) <= 200.0D) and (key = 100)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: key (type: string), rand() (type: double), (value * 10) (type: double)
+                outputColumnNames: _col0, _col2, _col3
+                Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+                Filter Operator
+                  predicate: (_col3 <= 200.0D) (type: boolean)
+                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                  Select Operator
+                    expressions: _col0 (type: string), _col2 (type: double), _col3 (type: double)
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                    File Output Operator
+                      compressed: false
+                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+                      table:
+                          input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                          output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+      Execution mode: vectorized
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/union_offcbo.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/union_offcbo.q.out b/ql/src/test/results/clientpositive/union_offcbo.q.out
index ab0e394..ce27bf2 100644
--- a/ql/src/test/results/clientpositive/union_offcbo.q.out
+++ b/ql/src/test/results/clientpositive/union_offcbo.q.out
@@ -288,21 +288,18 @@ STAGE PLANS:
           outputColumnNames: _col8, _col9, _col10, _col12, _col13, _col16, _col17, _col18, _col19
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and _col18 is not null) (type: boolean)
+            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and (CASE WHEN ((_col18 is not null and _col8 is null and (_col12 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col18 is not null and _col8 is null and (_col12 <= '2016-02-05'))) THEN ('RET') WHEN (((_col18 = _col8) and (_col19 <> _col9))) THEN ('A_INS') ELSE ('NA') END <> 'RET') and _col18 is not null) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col10 (type: bigint), _col16 (type: string), _col17 (type: bigint), _col13 (type: string), _col18 (type: string), _col19 (type: string), CASE WHEN ((_col18 is not null and _col8 is null and (_col12 >= '2016-02-05'))) THEN ('DEL') WHEN ((_col18 is not null and _col8 is null and (_col12 <= '2016-02-05'))) THEN ('RET') WHEN (((_col18 = _col8) and (_col19 <> _col9))) THEN ('A_INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col6, _col7
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Filter Operator
-                predicate: (_col7 <> 'RET') (type: boolean)
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-2
     Map Reduce
@@ -383,21 +380,18 @@ STAGE PLANS:
           outputColumnNames: _col0, _col1, _col6, _col7, _col8, _col9, _col11, _col18, _col19
           Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
           Filter Operator
-            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and _col8 is not null) (type: boolean)
+            predicate: (((NVL(_col8,-1) <> NVL(_col18,-1)) or (NVL(_col9,-1) <> NVL(_col19,-1))) and (CASE WHEN ((_col18 is not null and _col8 is null and (_col11 <= _col1))) THEN ('DEL') WHEN (((_col18 is null and _col8 is not null) or ((_col18 = _col8) and (_col19 <> _col9)))) THEN ('INS') ELSE ('NA') END <> 'RET') and _col8 is not null) (type: boolean)
             Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
             Select Operator
               expressions: _col0 (type: bigint), _col6 (type: string), _col7 (type: bigint), '2099-12-31' (type: string), _col8 (type: string), _col9 (type: string), CASE WHEN ((_col18 is not null and _col8 is null and (_col11 <= _col1))) THEN ('DEL') WHEN (((_col18 is null and _col8 is not null) or ((_col18 = _col8) and (_col19 <> _col9)))) THEN ('INS') ELSE ('NA') END (type: string)
               outputColumnNames: _col0, _col1, _col2, _col4, _col5, _col6, _col7
               Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-              Filter Operator
-                predicate: (_col7 <> 'RET') (type: boolean)
-                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
-                File Output Operator
-                  compressed: false
-                  table:
-                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                      serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+              File Output Operator
+                compressed: false
+                table:
+                    input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                    output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                    serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
 
   Stage: Stage-0
     Fetch Operator


[03/48] hive git commit: HIVE-20111: HBase-Hive (managed) table creation fails with strict managed table checks: Table is marked as a managed table but is not transactional (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
HIVE-20111: HBase-Hive (managed) table creation fails with strict managed table checks: Table is marked as a managed table but is not transactional (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3b88d6c1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3b88d6c1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3b88d6c1

Branch: refs/heads/master-txnstats
Commit: 3b88d6c1fd599ae3da8c781f41aee7dc2c195c83
Parents: 537c9cb
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Mon Jul 9 10:27:05 2018 -0700
Committer: Jesus Camacho Rodriguez <jc...@apache.org>
Committed: Fri Jul 13 21:38:19 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/hbase/HBaseMetaHook.java |  56 +++++++--------
 .../src/test/queries/negative/cascade_dbdrop.q  |   4 +-
 .../generatehfiles_require_family_path.q        |   5 +-
 .../src/test/queries/negative/hbase_ddl.q       |   4 +-
 .../test/queries/positive/external_table_ppd.q  |   5 +-
 .../test/queries/positive/hbase_binary_binary.q |   5 +-
 .../queries/positive/hbase_binary_map_queries.q |  13 ++--
 .../positive/hbase_binary_map_queries_prefix.q  |   9 +--
 .../positive/hbase_binary_storage_queries.q     |  12 ++--
 .../test/queries/positive/hbase_custom_key.q    |   5 +-
 .../test/queries/positive/hbase_custom_key2.q   |   5 +-
 .../test/queries/positive/hbase_custom_key3.q   |   5 +-
 .../src/test/queries/positive/hbase_ddl.q       |   4 +-
 .../queries/positive/hbase_decimal_decimal.q    |   5 +-
 .../test/queries/positive/hbase_handler_bulk.q  |  11 ++-
 .../src/test/queries/positive/hbase_joins.q     |  25 ++++---
 .../queries/positive/hbase_null_first_col.q     |   5 +-
 .../src/test/queries/positive/hbase_ppd_join.q  |   8 +--
 .../test/queries/positive/hbase_ppd_key_range.q |   5 +-
 .../src/test/queries/positive/hbase_pushdown.q  |   5 +-
 .../src/test/queries/positive/hbase_queries.q   |  54 +++++++++------
 .../test/queries/positive/hbase_scan_params.q   |   5 +-
 .../hbase_single_sourced_multi_insert.q         |   5 +-
 .../src/test/queries/positive/hbase_timestamp.q |  20 +++---
 .../queries/positive/hbase_timestamp_format.q   |   4 +-
 .../src/test/queries/positive/hbase_viewjoins.q |  10 +--
 .../src/test/queries/positive/hbasestats.q      |   5 +-
 .../src/test/queries/positive/ppd_key_ranges.q  |   5 +-
 .../test/results/negative/cascade_dbdrop.q.out  |   8 +--
 .../generatehfiles_require_family_path.q.out    |   6 +-
 .../src/test/results/negative/hbase_ddl.q.out   |   8 +--
 .../results/positive/external_table_ppd.q.out   |  14 ++--
 .../results/positive/hbase_binary_binary.q.out  |   6 +-
 .../positive/hbase_binary_map_queries.q.out     |  22 +++---
 .../hbase_binary_map_queries_prefix.q.out       |  14 ++--
 .../positive/hbase_binary_storage_queries.q.out |  28 +++++---
 .../results/positive/hbase_custom_key.q.out     |   6 +-
 .../results/positive/hbase_custom_key2.q.out    |   6 +-
 .../results/positive/hbase_custom_key3.q.out    |   6 +-
 .../src/test/results/positive/hbase_ddl.q.out   |  16 +++--
 .../positive/hbase_decimal_decimal.q.out        | Bin 1758 -> 1872 bytes
 .../results/positive/hbase_handler_bulk.q.out   |  16 ++---
 .../src/test/results/positive/hbase_joins.q.out |  30 +++++---
 .../results/positive/hbase_null_first_col.q.out |   6 +-
 .../test/results/positive/hbase_ppd_join.q.out  |  12 ++--
 .../results/positive/hbase_ppd_key_range.q.out  |   6 +-
 .../test/results/positive/hbase_pushdown.q.out  |   6 +-
 .../test/results/positive/hbase_queries.q.out   |  68 ++++++++++++-------
 .../results/positive/hbase_scan_params.q.out    |   6 +-
 .../hbase_single_sourced_multi_insert.q.out     |   6 +-
 .../test/results/positive/hbase_timestamp.q.out |  24 ++++---
 .../positive/hbase_timestamp_format.q.out       |   8 +--
 .../test/results/positive/hbase_viewjoins.q.out |  20 +++---
 .../src/test/results/positive/hbasestats.q.out  |  26 +++++--
 .../test/results/positive/ppd_key_ranges.q.out  |   6 +-
 55 files changed, 406 insertions(+), 278 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java
----------------------------------------------------------------------
diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java
index 3565c8c..f01ed57 100644
--- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java
+++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseMetaHook.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -44,6 +46,7 @@ import java.util.Set;
  * be used after usage.
  */
 public class HBaseMetaHook implements HiveMetaHook, Closeable {
+  private static final Logger LOG = LoggerFactory.getLogger(HBaseMetaHook.class);
   private Configuration hbaseConf;
   private Admin admin;
 
@@ -99,12 +102,15 @@ public class HBaseMetaHook implements HiveMetaHook, Closeable {
   public void commitDropTable(Table tbl, boolean deleteData) throws MetaException {
     try {
       String tableName = getHBaseTableName(tbl);
-      boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
-      if (deleteData && !isExternal) {
-        if (getHBaseAdmin().isTableEnabled(TableName.valueOf(tableName))) {
-          getHBaseAdmin().disableTable(TableName.valueOf(tableName));
+      boolean isPurge = !MetaStoreUtils.isExternalTable(tbl) || MetaStoreUtils.isExternalTablePurge(tbl);
+      if (deleteData && isPurge) {
+        LOG.info("Dropping with purge all the data for data source {}", tableName);
+        if (getHBaseAdmin().tableExists(TableName.valueOf(tableName))) {
+          if (getHBaseAdmin().isTableEnabled(TableName.valueOf(tableName))) {
+            getHBaseAdmin().disableTable(TableName.valueOf(tableName));
+          }
+          getHBaseAdmin().deleteTable(TableName.valueOf(tableName));
         }
-        getHBaseAdmin().deleteTable(TableName.valueOf(tableName));
       }
     } catch (IOException ie) {
       throw new MetaException(StringUtils.stringifyException(ie));
@@ -113,8 +119,6 @@ public class HBaseMetaHook implements HiveMetaHook, Closeable {
 
   @Override
   public void preCreateTable(Table tbl) throws MetaException {
-    boolean isExternal = MetaStoreUtils.isExternalTable(tbl);
-
     // We'd like to move this to HiveMetaStore for any non-native table, but
     // first we need to support storing NULL for location on a table
     if (tbl.getSd().getLocation() != null) {
@@ -133,34 +137,24 @@ public class HBaseMetaHook implements HiveMetaHook, Closeable {
       HTableDescriptor tableDesc;
 
       if (!getHBaseAdmin().tableExists(TableName.valueOf(tableName))) {
-        // if it is not an external table then create one
-        if (!isExternal) {
-          // Create the column descriptors
-          tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
-          Set<String> uniqueColumnFamilies = new HashSet<String>();
-
-          for (ColumnMappings.ColumnMapping colMap : columnMappings) {
-            if (!colMap.hbaseRowKey && !colMap.hbaseTimestamp) {
-              uniqueColumnFamilies.add(colMap.familyName);
-            }
-          }
+        // create table from Hive
+        // create the column descriptors
+        tableDesc = new HTableDescriptor(TableName.valueOf(tableName));
+        Set<String> uniqueColumnFamilies = new HashSet<String>();
 
-          for (String columnFamily : uniqueColumnFamilies) {
-            tableDesc.addFamily(new HColumnDescriptor(Bytes.toBytes(columnFamily)));
+        for (ColumnMappings.ColumnMapping colMap : columnMappings) {
+          if (!colMap.hbaseRowKey && !colMap.hbaseTimestamp) {
+            uniqueColumnFamilies.add(colMap.familyName);
           }
+        }
 
-          getHBaseAdmin().createTable(tableDesc);
-        } else {
-          // an external table
-          throw new MetaException("HBase table " + tableName +
-              " doesn't exist while the table is declared as an external table.");
+        for (String columnFamily : uniqueColumnFamilies) {
+          tableDesc.addFamily(new HColumnDescriptor(Bytes.toBytes(columnFamily)));
         }
 
+        getHBaseAdmin().createTable(tableDesc);
       } else {
-        if (!isExternal) {
-          throw new MetaException("Table " + tableName + " already exists within HBase; "
-              + "use CREATE EXTERNAL TABLE instead to register it in Hive.");
-        }
+        // register table in Hive
         // make sure the schema mapping is right
         tableDesc = getHBaseAdmin().getTableDescriptor(TableName.valueOf(tableName));
 
@@ -190,10 +184,10 @@ public class HBaseMetaHook implements HiveMetaHook, Closeable {
 
   @Override
   public void rollbackCreateTable(Table table) throws MetaException {
-    boolean isExternal = MetaStoreUtils.isExternalTable(table);
     String tableName = getHBaseTableName(table);
+    boolean isPurge = !MetaStoreUtils.isExternalTable(table) || MetaStoreUtils.isExternalTablePurge(table);
     try {
-      if (!isExternal && getHBaseAdmin().tableExists(TableName.valueOf(tableName))) {
+      if (isPurge && getHBaseAdmin().tableExists(TableName.valueOf(tableName))) {
         // we have created an HBase table, so we delete it to roll back;
         if (getHBaseAdmin().isTableEnabled(TableName.valueOf(tableName))) {
           getHBaseAdmin().disableTable(TableName.valueOf(tableName));

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
index 266aa06..48be8cd 100644
--- a/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
+++ b/hbase-handler/src/test/queries/negative/cascade_dbdrop.q
@@ -10,10 +10,10 @@ CREATE DATABASE hbaseDB;
 -- Exit Code < 0 on syntax/usage error
 -- Exit Code > 0 operation failed
 
-CREATE TABLE hbaseDB.hbase_table_0(key int, value string)
+CREATE EXTERNAL TABLE hbaseDB.hbase_table_0(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true");
 
 dfs -ls target/tmp/hbase/data/default/hbase_table_0;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/negative/generatehfiles_require_family_path.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/generatehfiles_require_family_path.q b/hbase-handler/src/test/queries/negative/generatehfiles_require_family_path.q
index e6ff587..4e2f061 100644
--- a/hbase-handler/src/test/queries/negative/generatehfiles_require_family_path.q
+++ b/hbase-handler/src/test/queries/negative/generatehfiles_require_family_path.q
@@ -3,9 +3,10 @@
 
 DROP TABLE IF EXISTS hbase_bulk;
 
-CREATE TABLE hbase_bulk (key INT, value STRING)
+CREATE EXTERNAL TABLE hbase_bulk (key INT, value STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ('hbase.columns.mapping' = ':key,cf:string');
+WITH SERDEPROPERTIES ('hbase.columns.mapping' = ':key,cf:string')
+TBLPROPERTIES ("external.table.purge" = "true");
 
 SET hive.hbase.generatehfiles = true;
 INSERT OVERWRITE TABLE hbase_bulk SELECT * FROM src CLUSTER BY key;

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/negative/hbase_ddl.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/negative/hbase_ddl.q b/hbase-handler/src/test/queries/negative/hbase_ddl.q
index 2913bcd..24f684c 100644
--- a/hbase-handler/src/test/queries/negative/hbase_ddl.q
+++ b/hbase-handler/src/test/queries/negative/hbase_ddl.q
@@ -1,8 +1,8 @@
 DROP TABLE hbase_table_1;
-CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true");
 
 DESCRIBE EXTENDED hbase_table_1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/external_table_ppd.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/external_table_ppd.q b/hbase-handler/src/test/queries/positive/external_table_ppd.q
index c6f80cb..ed51667 100644
--- a/hbase-handler/src/test/queries/positive/external_table_ppd.q
+++ b/hbase-handler/src/test/queries/positive/external_table_ppd.q
@@ -1,7 +1,7 @@
 --! qt:dataset:src
 DROP TABLE t_hbase;
 
-CREATE TABLE t_hbase(key STRING,
+CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -12,7 +12,8 @@ CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:binarykey#-,cf:binarybyte#-,cf:binaryshort#-,:key#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary");
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true");
 
 DESCRIBE FORMATTED t_hbase;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_binary_binary.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_binary_binary.q b/hbase-handler/src/test/queries/positive/hbase_binary_binary.q
index 388e5aa..7c17ca2 100644
--- a/hbase-handler/src/test/queries/positive/hbase_binary_binary.q
+++ b/hbase-handler/src/test/queries/positive/hbase_binary_binary.q
@@ -1,9 +1,10 @@
 drop table if exists testhbaseb;
-CREATE TABLE testhbaseb (key int, val binary)
+CREATE EXTERNAL TABLE testhbaseb (key int, val binary)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:val#b"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 insert into table testhbaseb values(1, 'hello');
 insert into table testhbaseb values(2, 'hi');
 select * from testhbaseb;

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_binary_map_queries.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_binary_map_queries.q b/hbase-handler/src/test/queries/positive/hbase_binary_map_queries.q
index 7d9e16e..916918d 100644
--- a/hbase-handler/src/test/queries/positive/hbase_binary_map_queries.q
+++ b/hbase-handler/src/test/queries/positive/hbase_binary_map_queries.q
@@ -1,14 +1,15 @@
 --! qt:dataset:src
 DROP TABLE hbase_src;
 
-CREATE TABLE hbase_src(key STRING,
+CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
                        bigint_col BIGINT,
                        float_col FLOAT,
                        double_col DOUBLE,
-                       string_col STRING);
+                       string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_src
   SELECT key, key, key, key, key, key, key, value
@@ -17,7 +18,7 @@ INSERT OVERWRITE TABLE hbase_src
 
 DROP TABLE t_hbase_maps;
 
-CREATE TABLE t_hbase_maps(key STRING,
+CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           tinyint_map_col MAP<TINYINT, TINYINT>,
                           smallint_map_col MAP<SMALLINT, SMALLINT>,
                           int_map_col MAP<INT, INT>,
@@ -28,7 +29,7 @@ CREATE TABLE t_hbase_maps(key STRING,
                           boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-tinyint:,cf-smallint:,cf-int:,cf-bigint:,cf-float:,cf-double:,cf-string:,cf-boolean:")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps");
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE t_hbase_maps
   SELECT key,
@@ -119,7 +120,7 @@ DROP TABLE t_ext_hbase_maps_2;
 
 DROP TABLE t_hbase_maps_1;
 
-CREATE TABLE t_hbase_maps_1(key STRING,
+CREATE EXTERNAL TABLE t_hbase_maps_1(key STRING,
                             tinyint_map_col MAP<TINYINT, TINYINT>,
                             smallint_map_col MAP<SMALLINT, SMALLINT>,
                             int_map_col MAP<INT, INT>,
@@ -130,7 +131,7 @@ CREATE TABLE t_hbase_maps_1(key STRING,
                             boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key#b,cf-tinyint:#b:b,cf-smallint:#b:b,cf-int:#b:b,cf-bigint:#b:b,cf-float:#b:b,cf-double:#b:b,cf-string:#b:b,cf-boolean:#b:b")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1");
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1", "external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE t_hbase_maps_1
   SELECT key,

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_binary_map_queries_prefix.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_binary_map_queries_prefix.q b/hbase-handler/src/test/queries/positive/hbase_binary_map_queries_prefix.q
index c205210..c16c85a 100644
--- a/hbase-handler/src/test/queries/positive/hbase_binary_map_queries_prefix.q
+++ b/hbase-handler/src/test/queries/positive/hbase_binary_map_queries_prefix.q
@@ -1,14 +1,15 @@
 --! qt:dataset:src
 DROP TABLE hbase_src;
 
-CREATE TABLE hbase_src(key STRING,
+CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
                        bigint_col BIGINT,
                        float_col FLOAT,
                        double_col DOUBLE,
-                       string_col STRING);
+                       string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_src
   SELECT key, key, key, key, key, key, key, value
@@ -17,12 +18,12 @@ INSERT OVERWRITE TABLE hbase_src
 
 DROP TABLE t_hbase_maps;
 
-CREATE TABLE t_hbase_maps(key STRING,
+CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           string_map_col MAP<STRING, STRING>,
                           simple_string_col STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-string:,cf-string:simple_string_col")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps");
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE t_hbase_maps
   SELECT key,

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_binary_storage_queries.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_binary_storage_queries.q b/hbase-handler/src/test/queries/positive/hbase_binary_storage_queries.q
index 86ead91..02d6ae9 100644
--- a/hbase-handler/src/test/queries/positive/hbase_binary_storage_queries.q
+++ b/hbase-handler/src/test/queries/positive/hbase_binary_storage_queries.q
@@ -1,7 +1,7 @@
 --! qt:dataset:src
 DROP TABLE t_hbase;
 
-CREATE TABLE t_hbase(key STRING,
+CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -12,7 +12,8 @@ CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary");
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true");
 
 DESCRIBE FORMATTED t_hbase;
 
@@ -93,7 +94,7 @@ DROP TABLE t_hbase_1;
 DROP TABLE t_hbase;
 DROP TABLE t_hbase_2;
 
-CREATE TABLE t_hbase_2(key STRING,
+CREATE EXTERNAL TABLE t_hbase_2(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -103,7 +104,7 @@ CREATE TABLE t_hbase_2(key STRING,
                      boolean_col BOOLEAN)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
-TBLPROPERTIES ("hbase.table.name" = "t_hive_2");
+TBLPROPERTIES ("hbase.table.name" = "t_hive_2", "external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE t_hbase_2
 SELECT 'user1', 1, 1, 1, 1, 1.0, 1.0, true
@@ -192,7 +193,8 @@ STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES (
 "hbase.table.name" = "t_hive_2",
-"hbase.table.default.storage.type" = "binary");
+"hbase.table.default.storage.type" = "binary",
+"external.table.purge" = "true");
 
 SELECT * FROM t_hbase_4;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_custom_key.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_custom_key.q b/hbase-handler/src/test/queries/positive/hbase_custom_key.q
index 87fbf4a..4df8d48 100644
--- a/hbase-handler/src/test/queries/positive/hbase_custom_key.q
+++ b/hbase-handler/src/test/queries/positive/hbase_custom_key.q
@@ -1,10 +1,11 @@
 --! qt:dataset:src
-CREATE TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+CREATE EXTERNAL TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom",
     "hbase.columns.mapping" = ":key,cf:string",
-    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory");
+    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 CREATE EXTERNAL TABLE hbase_ck_2(key string, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_custom_key2.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_custom_key2.q b/hbase-handler/src/test/queries/positive/hbase_custom_key2.q
index 5116475..462365e 100644
--- a/hbase-handler/src/test/queries/positive/hbase_custom_key2.q
+++ b/hbase-handler/src/test/queries/positive/hbase_custom_key2.q
@@ -1,10 +1,11 @@
 --! qt:dataset:src
-CREATE TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
+CREATE EXTERNAL TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom2",
     "hbase.columns.mapping" = ":key,cf:string",
-    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory2");
+    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory2")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 from src tablesample (5 rows)
 insert into table hbase_ck_4 select

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_custom_key3.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_custom_key3.q b/hbase-handler/src/test/queries/positive/hbase_custom_key3.q
index 488a32f..106a263 100644
--- a/hbase-handler/src/test/queries/positive/hbase_custom_key3.q
+++ b/hbase-handler/src/test/queries/positive/hbase_custom_key3.q
@@ -1,10 +1,11 @@
 --! qt:dataset:src
-CREATE TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
+CREATE EXTERNAL TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom3",
     "hbase.columns.mapping" = ":key,cf:string",
-    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory3");
+    "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory3")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 from src tablesample (5 rows)
 insert into table hbase_ck_5 select

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_ddl.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_ddl.q b/hbase-handler/src/test/queries/positive/hbase_ddl.q
index dcec8d6..e339a68 100644
--- a/hbase-handler/src/test/queries/positive/hbase_ddl.q
+++ b/hbase-handler/src/test/queries/positive/hbase_ddl.q
@@ -1,9 +1,9 @@
 --! qt:dataset:src
 DROP TABLE hbase_table_1;
-CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true");
 
 DESCRIBE EXTENDED hbase_table_1;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_decimal_decimal.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_decimal_decimal.q b/hbase-handler/src/test/queries/positive/hbase_decimal_decimal.q
index d943fbd..016f10b 100644
--- a/hbase-handler/src/test/queries/positive/hbase_decimal_decimal.q
+++ b/hbase-handler/src/test/queries/positive/hbase_decimal_decimal.q
@@ -1,11 +1,12 @@
-CREATE TABLE testhbase_decimal (
+CREATE EXTERNAL TABLE testhbase_decimal (
 id int,
 balance decimal(15,2))
 ROW FORMAT DELIMITED
 COLLECTION ITEMS TERMINATED BY '~'
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
-"hbase.columns.mapping"=":key,cf:balance#b");
+"hbase.columns.mapping"=":key,cf:balance#b")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 insert into testhbase_decimal values (1,1), (2, 2.2), (3, 33.33);
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_handler_bulk.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_handler_bulk.q b/hbase-handler/src/test/queries/positive/hbase_handler_bulk.q
index 1eadfce..906fc38 100644
--- a/hbase-handler/src/test/queries/positive/hbase_handler_bulk.q
+++ b/hbase-handler/src/test/queries/positive/hbase_handler_bulk.q
@@ -4,10 +4,10 @@
 drop table if exists hb_target;
 
 -- this is the target HBase table
-create table hb_target(key int, val string)
+create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk');
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true');
 
 set hive.hbase.generatehfiles=true;
 set hfile.family.path=/tmp/hb_target/cf;
@@ -28,10 +28,10 @@ drop table hb_target;
 dfs -rmr /tmp/hb_target/cf;
 
 
-create table hb_target(key int, val string)
+create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk');
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true');
 
 -- do it twice - regression test for HIVE-18607
 
@@ -43,6 +43,3 @@ insert overwrite table hb_target select distinct key, value from src cluster by
 
 drop table hb_target;
 dfs -rmr /tmp/hb_target/cf;
-
-
-

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_joins.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_joins.q b/hbase-handler/src/test/queries/positive/hbase_joins.q
index 7be9f6a..816a756 100644
--- a/hbase-handler/src/test/queries/positive/hbase_joins.q
+++ b/hbase-handler/src/test/queries/positive/hbase_joins.q
@@ -6,23 +6,26 @@ DROP TABLE users_level;
 
 -- From HIVE-1257
 
-CREATE TABLE users(key string, state string, country string, country_id int)
+CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
-CREATE TABLE states(key string, name string)
+CREATE EXTERNAL TABLE states(key string, name string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "state:name"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
-CREATE TABLE countries(key string, name string, country string, country_id int)
+CREATE EXTERNAL TABLE countries(key string, name string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:name,info:country,info:country_id"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0
 FROM src WHERE key=100;
@@ -65,13 +68,15 @@ DROP TABLE users;
 DROP TABLE states;
 DROP TABLE countries;
 
-CREATE TABLE users(key int, userid int, username string, created int) 
+CREATE EXTERNAL TABLE users(key int, userid int, username string, created int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:nickname,f:created");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:nickname,f:created")
+TBLPROPERTIES ("external.table.purge" = "true");
 
-CREATE TABLE users_level(key int, userid int, level int)
+CREATE EXTERNAL TABLE users_level(key int, userid int, level int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:level");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:level")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 -- HIVE-1903:  the problem fixed here showed up even without any data,
 -- so no need to load any to test it

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_null_first_col.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_null_first_col.q b/hbase-handler/src/test/queries/positive/hbase_null_first_col.q
index 0d9ff56..5c5c311 100644
--- a/hbase-handler/src/test/queries/positive/hbase_null_first_col.q
+++ b/hbase-handler/src/test/queries/positive/hbase_null_first_col.q
@@ -4,11 +4,12 @@ DROP TABLE hbase_null;
 CREATE TABLE src_null(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE;
 LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE src_null;
 
-CREATE TABLE hbase_null(key string, col1 string, col2 string)
+CREATE EXTERNAL TABLE hbase_null(key string, col1 string, col2 string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf1:c1,cf1:c2"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 SELECT d, a, c FROM src_null;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_ppd_join.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_ppd_join.q b/hbase-handler/src/test/queries/positive/hbase_ppd_join.q
index b7e2a3b..b8e2bcb 100644
--- a/hbase-handler/src/test/queries/positive/hbase_ppd_join.q
+++ b/hbase-handler/src/test/queries/positive/hbase_ppd_join.q
@@ -5,11 +5,11 @@ drop table if exists hive1_tbl_data_hbase2;
 drop view if exists hive1_view_data_hbase1;
 drop view if exists hive1_view_data_hbase2;
 
-CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+CREATE EXTERNAL TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
-;
+TBLPROPERTIES ("external.table.purge" = "true");
 
 --create hive view for the above hive table 1
 CREATE VIEW hive1_view_data_hbase1 
@@ -26,11 +26,11 @@ AND COL_UPDATED_DATE IS NOT NULL
 insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100;
 
 --create hive hbase table 2
-CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+CREATE EXTERNAL TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
-;
+TBLPROPERTIES ("external.table.purge" = "true");
 
 --create hive view for the above hive hbase table 2
 CREATE VIEW hive1_view_data_hbase2 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_ppd_key_range.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_ppd_key_range.q b/hbase-handler/src/test/queries/positive/hbase_ppd_key_range.q
index cf1d1d4..b525d8c 100644
--- a/hbase-handler/src/test/queries/positive/hbase_ppd_key_range.q
+++ b/hbase-handler/src/test/queries/positive/hbase_ppd_key_range.q
@@ -1,8 +1,9 @@
 --! qt:dataset:src
 --! qt:dataset:part
-CREATE TABLE hbase_pushdown(key string, value string) 
+CREATE EXTERNAL TABLE hbase_pushdown(key string, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_pushdown 
 SELECT cast(key as string), value

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_pushdown.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_pushdown.q b/hbase-handler/src/test/queries/positive/hbase_pushdown.q
index 8e366af..74d5ad5 100644
--- a/hbase-handler/src/test/queries/positive/hbase_pushdown.q
+++ b/hbase-handler/src/test/queries/positive/hbase_pushdown.q
@@ -1,8 +1,9 @@
 --! qt:dataset:src
 --! qt:dataset:part
-CREATE TABLE hbase_pushdown(key int, value string) 
+CREATE EXTERNAL TABLE hbase_pushdown(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_pushdown 
 SELECT *

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_queries.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_queries.q b/hbase-handler/src/test/queries/positive/hbase_queries.q
index a4ea0c5..9ddecd5 100644
--- a/hbase-handler/src/test/queries/positive/hbase_queries.q
+++ b/hbase-handler/src/test/queries/positive/hbase_queries.q
@@ -1,9 +1,9 @@
 --! qt:dataset:src
 DROP TABLE hbase_table_1;
-CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0");
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true");
 
 DESCRIBE EXTENDED hbase_table_1;
 
@@ -53,9 +53,10 @@ ON (x.key = Y.key)
 ORDER BY key,value;
 
 DROP TABLE empty_hbase_table;
-CREATE TABLE empty_hbase_table(key int, value string) 
+CREATE EXTERNAL TABLE empty_hbase_table(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 DROP TABLE empty_normal_table;
 CREATE TABLE empty_normal_table(key int, value string);
@@ -65,11 +66,12 @@ select * from (select count(1) c from empty_normal_table union all select count(
 select * from (select count(1) c from src union all select count(1) as c from empty_hbase_table) x order by c;
 select * from (select count(1) c from src union all select count(1) as c from hbase_table_1) x order by c;
 
-CREATE TABLE hbase_table_3(key int, value string, count int) 
+CREATE EXTERNAL TABLE hbase_table_3(key int, value string, count int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:val,cf2:count"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 EXPLAIN 
 INSERT OVERWRITE TABLE hbase_table_3
@@ -93,11 +95,12 @@ select * from hbase_table_3 order by key, value limit 5;
 select key, count from hbase_table_3 order by key, count desc limit 5;
 
 DROP TABLE hbase_table_4;
-CREATE TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
+CREATE EXTERNAL TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_table_4 SELECT key, value, key+1, key+2 
 FROM src WHERE key=98 OR key=100;
@@ -113,22 +116,24 @@ TBLPROPERTIES ("hbase.table.name" = "hbase_table_4");
 SELECT * FROM hbase_table_5 ORDER BY key;
 
 DROP TABLE hbase_table_6;
-CREATE TABLE hbase_table_6(key int, value map<string,string>) 
+CREATE EXTERNAL TABLE hbase_table_6(key int, value map<string,string>) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 INSERT OVERWRITE TABLE hbase_table_6 SELECT key, map(value, key) FROM src
 WHERE key=98 OR key=100;
 
 SELECT * FROM hbase_table_6 ORDER BY key;
 
 DROP TABLE hbase_table_7;
-CREATE TABLE hbase_table_7(value map<string,string>, key int) 
+CREATE EXTERNAL TABLE hbase_table_7(value map<string,string>, key int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:,:key"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 INSERT OVERWRITE TABLE hbase_table_7 
 SELECT map(value, key, upper(value), key+1), key FROM src
 WHERE key=98 OR key=100;
@@ -138,11 +143,12 @@ SELECT * FROM hbase_table_7 ORDER BY key;
 set hive.hbase.wal.enabled=false;
 
 DROP TABLE hbase_table_8;
-CREATE TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
+CREATE EXTERNAL TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_table_8 SELECT key, value, key+1, key+2 
 FROM src WHERE key=98 OR key=100;
@@ -166,9 +172,10 @@ SELECT COUNT(*) FROM hbase_table_1_like;
 SHOW CREATE TABLE hbase_table_1_like;
 
 DROP TABLE IF EXISTS hbase_table_9;
-CREATE TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
+CREATE EXTERNAL TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col#s:s,cf:str_col");
+with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col#s:s,cf:str_col")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 insert overwrite table hbase_table_9 select 1 as id, map('abcd', null) as data , null as str from src limit 1;
 insert into table hbase_table_9 select 2 as id, map('efgh', null) as data , '1234' as str from src limit 1;
@@ -178,9 +185,10 @@ insert into table hbase_table_9 select 5 as id, map('key1',null, 'key2', 'avalue
 select * from hbase_table_9;
 
 DROP TABLE IF EXISTS hbase_table_10;
-CREATE TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
+CREATE EXTERNAL TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col");
+with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col")
+TBLPROPERTIES ("external.table.purge" = "true");
 set hive.cbo.enable=false;
 insert overwrite table hbase_table_10 select 1 as id, map(10, cast(null as int)) as data , null as str from src limit 1;
 insert into table hbase_table_10 select 2 as id, map(20, cast(null as int)) as data , '1234' as str from src limit 1;
@@ -191,16 +199,18 @@ select * from hbase_table_10;
 
 
 DROP TABLE IF EXISTS hbase_table_11;
-CREATE TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
+CREATE EXTERNAL TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id');
+WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true");
 INSERT INTO hbase_table_11 SELECT 2,NAMED_STRUCT("s_int",CAST(NULL AS INT),"s_string","s1","s_date",CAST('2018-03-12' AS DATE)) FROM src LIMIT 1;
 select * from hbase_table_11;
 
 DROP TABLE IF EXISTS hbase_table_12;
-CREATE TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
+CREATE EXTERNAL TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id');
+WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true");
 INSERT INTO hbase_table_12 SELECT 2, ARRAY("a", CAST (NULL AS STRING),  "b") FROM src LIMIT 1;
 select * from hbase_table_12;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_scan_params.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_scan_params.q b/hbase-handler/src/test/queries/positive/hbase_scan_params.q
index 522960c..d91a036 100644
--- a/hbase-handler/src/test/queries/positive/hbase_scan_params.q
+++ b/hbase-handler/src/test/queries/positive/hbase_scan_params.q
@@ -1,8 +1,9 @@
 --! qt:dataset:src
-CREATE TABLE hbase_pushdown(key int, value string)
+CREATE EXTERNAL TABLE hbase_pushdown(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string",
-"hbase.scan.cache" = "500", "hbase.scan.cacheblocks" = "true", "hbase.scan.batch" = "1");
+"hbase.scan.cache" = "500", "hbase.scan.cacheblocks" = "true", "hbase.scan.batch" = "1")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_pushdown SELECT * FROM src;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_single_sourced_multi_insert.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_single_sourced_multi_insert.q b/hbase-handler/src/test/queries/positive/hbase_single_sourced_multi_insert.q
index bd4672b..15bf118 100644
--- a/hbase-handler/src/test/queries/positive/hbase_single_sourced_multi_insert.q
+++ b/hbase-handler/src/test/queries/positive/hbase_single_sourced_multi_insert.q
@@ -1,9 +1,10 @@
 --! qt:dataset:src
 -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE
 CREATE TABLE src_x1(key string, value string);
-CREATE TABLE src_x2(key string, value string)
+CREATE EXTERNAL TABLE src_x2(key string, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:value");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:value")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 explain
 from src a

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_timestamp.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_timestamp.q b/hbase-handler/src/test/queries/positive/hbase_timestamp.q
index 46d7529..5972f49 100644
--- a/hbase-handler/src/test/queries/positive/hbase_timestamp.q
+++ b/hbase-handler/src/test/queries/positive/hbase_timestamp.q
@@ -1,23 +1,26 @@
 --! qt:dataset:src
 DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, `time` timestamp)
+CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` timestamp)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
+  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true");
 DESC extended hbase_table;
 FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, "2012-02-23 10:14:52" WHERE (key % 17) = 0;
 SELECT * FROM hbase_table;
 
 DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, `time` bigint)
+CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
+  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true");
 FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, value, 1329959754000 WHERE (key % 17) = 0;
 SELECT key, value, cast(`time` as timestamp) FROM hbase_table;
 
 DROP TABLE hbase_table;
-CREATE TABLE hbase_table (key string, value string, `time` bigint)
+CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp");
+  WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true");
 insert overwrite table hbase_table select key,value,ts FROM
 (
   select key, value, 100000000000 as ts from src WHERE (key % 33) = 0
@@ -42,9 +45,10 @@ SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AN
 SELECT key, value, cast(`time` as timestamp) FROM hbase_table WHERE key > 100 AND key < 400 AND `time` >= 200000000000;
 
 DROP TABLE hbase_table;
-CREATE TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
+CREATE EXTERNAL TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp")
+TBLPROPERTIES ("external.table.purge" = "true");
 FROM src INSERT OVERWRITE TABLE hbase_table SELECT key, MAP("name", CONCAT(value, " Jr")), "2012-02-23 10:14:52" WHERE (key % 17) = 0;
 FROM src INSERT INTO TABLE hbase_table SELECT key, MAP("age", '40'), "2015-12-12 12:12:12" WHERE (key % 17) = 0;
 FROM src INSERT INTO TABLE hbase_table SELECT key, MAP("name", value), "2000-01-01 01:01:01" WHERE (key % 17) = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_timestamp_format.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_timestamp_format.q b/hbase-handler/src/test/queries/positive/hbase_timestamp_format.q
index 75820e1..eb872f4 100644
--- a/hbase-handler/src/test/queries/positive/hbase_timestamp_format.q
+++ b/hbase-handler/src/test/queries/positive/hbase_timestamp_format.q
@@ -1,9 +1,9 @@
 --! qt:dataset:src
 
-create table hbase_str(rowkey string,mytime string,mystr string)
+create external table hbase_str(rowkey string,mytime string,mystr string)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ('hbase.columns.mapping' = 'm:mytime,m:mystr')
-  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts');
+  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts', 'external.table.purge' = 'true');
 
 describe hbase_str;
 insert overwrite table hbase_str select key, '2001-02-03-04.05.06.123456', value from src limit 3;

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbase_viewjoins.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbase_viewjoins.q b/hbase-handler/src/test/queries/positive/hbase_viewjoins.q
index 5c98903..f18a099 100644
--- a/hbase-handler/src/test/queries/positive/hbase_viewjoins.q
+++ b/hbase-handler/src/test/queries/positive/hbase_viewjoins.q
@@ -2,7 +2,7 @@ DROP VIEW IF EXISTS VIEW_HBASE_TABLE_TEST_2;
 DROP VIEW IF EXISTS VIEW_HBASE_TABLE_TEST_1;
 DROP TABLE IF EXISTS HBASE_TABLE_TEST_2;
 DROP TABLE IF EXISTS HBASE_TABLE_TEST_1;
-CREATE TABLE HBASE_TABLE_TEST_1(
+CREATE EXTERNAL TABLE HBASE_TABLE_TEST_1(
   cvalue string ,
   pk string,
  ccount int   )
@@ -17,11 +17,12 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_1',
-  'serialization.null.format'=''  );
+  'serialization.null.format'='',
+  'external.table.purge' = 'true');
 
 CREATE VIEW VIEW_HBASE_TABLE_TEST_1 AS SELECT hbase_table_test_1.cvalue,hbase_table_test_1.pk,hbase_table_test_1.ccount FROM hbase_table_test_1 WHERE hbase_table_test_1.ccount IS NOT NULL;
 
-CREATE TABLE HBASE_TABLE_TEST_2(
+CREATE EXTERNAL TABLE HBASE_TABLE_TEST_2(
   cvalue string ,
     pk string ,
    ccount int  )
@@ -36,7 +37,8 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_2',
-  'serialization.null.format'='');
+  'serialization.null.format'='',
+  'external.table.purge' = 'true');
 
 CREATE VIEW VIEW_HBASE_TABLE_TEST_2 AS SELECT hbase_table_test_2.cvalue,hbase_table_test_2.pk,hbase_table_test_2.ccount
 FROM hbase_table_test_2 WHERE  hbase_table_test_2.pk >='3-0000h-0' AND hbase_table_test_2.pk <= '3-0000h-g' AND

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/hbasestats.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/hbasestats.q b/hbase-handler/src/test/queries/positive/hbasestats.q
index 0185002..6398417 100644
--- a/hbase-handler/src/test/queries/positive/hbasestats.q
+++ b/hbase-handler/src/test/queries/positive/hbasestats.q
@@ -1,11 +1,12 @@
 --! qt:dataset:src
 DROP TABLE users;
 
-CREATE TABLE users(key string, state string, country string, country_id int)
+CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
-);
+)
+TBLPROPERTIES ("external.table.purge" = "true");
 
 desc formatted users;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/queries/positive/ppd_key_ranges.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/ppd_key_ranges.q b/hbase-handler/src/test/queries/positive/ppd_key_ranges.q
index 8b70c04..b47f467 100644
--- a/hbase-handler/src/test/queries/positive/ppd_key_ranges.q
+++ b/hbase-handler/src/test/queries/positive/ppd_key_ranges.q
@@ -1,7 +1,8 @@
 --! qt:dataset:src
-CREATE TABLE hbase_ppd_keyrange(key int, value string) 
+CREATE EXTERNAL TABLE hbase_ppd_keyrange(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
-WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#binary,cf:string");
+WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#binary,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true");
 
 INSERT OVERWRITE TABLE hbase_ppd_keyrange 
 SELECT *

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out b/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out
index cef7a06..803e35e 100644
--- a/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out
+++ b/hbase-handler/src/test/results/negative/cascade_dbdrop.q.out
@@ -4,17 +4,17 @@ PREHOOK: Output: database:hbaseDB
 POSTHOOK: query: CREATE DATABASE hbaseDB
 POSTHOOK: type: CREATEDATABASE
 POSTHOOK: Output: database:hbaseDB
-PREHOOK: query: CREATE TABLE hbaseDB.hbase_table_0(key int, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbaseDB.hbase_table_0(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:hbasedb
 PREHOOK: Output: hbaseDB@hbase_table_0
-POSTHOOK: query: CREATE TABLE hbaseDB.hbase_table_0(key int, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbaseDB.hbase_table_0(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:hbasedb
 POSTHOOK: Output: hbaseDB@hbase_table_0

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/negative/generatehfiles_require_family_path.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/negative/generatehfiles_require_family_path.q.out b/hbase-handler/src/test/results/negative/generatehfiles_require_family_path.q.out
index 52ac38f..aab4cf9 100644
--- a/hbase-handler/src/test/results/negative/generatehfiles_require_family_path.q.out
+++ b/hbase-handler/src/test/results/negative/generatehfiles_require_family_path.q.out
@@ -2,15 +2,17 @@ PREHOOK: query: DROP TABLE IF EXISTS hbase_bulk
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS hbase_bulk
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_bulk (key INT, value STRING)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_bulk (key INT, value STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping' = ':key,cf:string')
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_bulk
-POSTHOOK: query: CREATE TABLE hbase_bulk (key INT, value STRING)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_bulk (key INT, value STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping' = ':key,cf:string')
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_bulk

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/negative/hbase_ddl.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/negative/hbase_ddl.q.out b/hbase-handler/src/test/results/negative/hbase_ddl.q.out
index fde2fc5..e4c146b 100644
--- a/hbase-handler/src/test/results/negative/hbase_ddl.q.out
+++ b/hbase-handler/src/test/results/negative/hbase_ddl.q.out
@@ -2,17 +2,17 @@ PREHOOK: query: DROP TABLE hbase_table_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/external_table_ppd.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/external_table_ppd.q.out b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
index aed43cf..6832920 100644
--- a/hbase-handler/src/test/results/positive/external_table_ppd.q.out
+++ b/hbase-handler/src/test/results/positive/external_table_ppd.q.out
@@ -2,7 +2,7 @@ PREHOOK: query: DROP TABLE t_hbase
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -13,11 +13,12 @@ PREHOOK: query: CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:binarykey#-,cf:binarybyte#-,cf:binaryshort#-,:key#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary")
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase
-POSTHOOK: query: CREATE TABLE t_hbase(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -28,7 +29,8 @@ POSTHOOK: query: CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:binarykey#-,cf:binarybyte#-,cf:binaryshort#-,:key#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary")
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase
@@ -53,10 +55,12 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"bigint_col\":\"true\",\"boolean_col\":\"true\",\"double_col\":\"true\",\"float_col\":\"true\",\"int_col\":\"true\",\"key\":\"true\",\"smallint_col\":\"true\",\"tinyint_col\":\"true\"}}
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 	hbase.table.default.storage.type	binary              
 	hbase.table.name    	t_hive              
 	numFiles            	0                   

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out b/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
index e04227f..debe89c 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_binary.q.out
@@ -2,19 +2,21 @@ PREHOOK: query: drop table if exists testhbaseb
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table if exists testhbaseb
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE testhbaseb (key int, val binary)
+PREHOOK: query: CREATE EXTERNAL TABLE testhbaseb (key int, val binary)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:val#b"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@testhbaseb
-POSTHOOK: query: CREATE TABLE testhbaseb (key int, val binary)
+POSTHOOK: query: CREATE EXTERNAL TABLE testhbaseb (key int, val binary)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:val#b"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@testhbaseb

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_binary_map_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_map_queries.q.out b/hbase-handler/src/test/results/positive/hbase_binary_map_queries.q.out
index feaca4b..57fea2c 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_map_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_map_queries.q.out
@@ -2,7 +2,7 @@ PREHOOK: query: DROP TABLE hbase_src
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_src
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_src(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
@@ -10,10 +10,11 @@ PREHOOK: query: CREATE TABLE hbase_src(key STRING,
                        float_col FLOAT,
                        double_col DOUBLE,
                        string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_src
-POSTHOOK: query: CREATE TABLE hbase_src(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
@@ -21,6 +22,7 @@ POSTHOOK: query: CREATE TABLE hbase_src(key STRING,
                        float_col FLOAT,
                        double_col DOUBLE,
                        string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_src
@@ -50,7 +52,7 @@ PREHOOK: query: DROP TABLE t_hbase_maps
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase_maps
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           tinyint_map_col MAP<TINYINT, TINYINT>,
                           smallint_map_col MAP<SMALLINT, SMALLINT>,
                           int_map_col MAP<INT, INT>,
@@ -61,11 +63,11 @@ PREHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
                           boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-tinyint:,cf-smallint:,cf-int:,cf-bigint:,cf-float:,cf-double:,cf-string:,cf-boolean:")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase_maps
-POSTHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           tinyint_map_col MAP<TINYINT, TINYINT>,
                           smallint_map_col MAP<SMALLINT, SMALLINT>,
                           int_map_col MAP<INT, INT>,
@@ -76,7 +78,7 @@ POSTHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
                           boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-tinyint:,cf-smallint:,cf-int:,cf-bigint:,cf-float:,cf-double:,cf-string:,cf-boolean:")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase_maps
@@ -314,7 +316,7 @@ PREHOOK: query: DROP TABLE t_hbase_maps_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase_maps_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase_maps_1(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps_1(key STRING,
                             tinyint_map_col MAP<TINYINT, TINYINT>,
                             smallint_map_col MAP<SMALLINT, SMALLINT>,
                             int_map_col MAP<INT, INT>,
@@ -325,11 +327,11 @@ PREHOOK: query: CREATE TABLE t_hbase_maps_1(key STRING,
                             boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key#b,cf-tinyint:#b:b,cf-smallint:#b:b,cf-int:#b:b,cf-bigint:#b:b,cf-float:#b:b,cf-double:#b:b,cf-string:#b:b,cf-boolean:#b:b")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase_maps_1
-POSTHOOK: query: CREATE TABLE t_hbase_maps_1(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps_1(key STRING,
                             tinyint_map_col MAP<TINYINT, TINYINT>,
                             smallint_map_col MAP<SMALLINT, SMALLINT>,
                             int_map_col MAP<INT, INT>,
@@ -340,7 +342,7 @@ POSTHOOK: query: CREATE TABLE t_hbase_maps_1(key STRING,
                             boolean_map_col MAP<BOOLEAN, BOOLEAN>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key#b,cf-tinyint:#b:b,cf-smallint:#b:b,cf-int:#b:b,cf-bigint:#b:b,cf-float:#b:b,cf-double:#b:b,cf-string:#b:b,cf-boolean:#b:b")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps_1", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase_maps_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_binary_map_queries_prefix.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_map_queries_prefix.q.out b/hbase-handler/src/test/results/positive/hbase_binary_map_queries_prefix.q.out
index f6432b3..946d722 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_map_queries_prefix.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_map_queries_prefix.q.out
@@ -2,7 +2,7 @@ PREHOOK: query: DROP TABLE hbase_src
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_src
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_src(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
@@ -10,10 +10,11 @@ PREHOOK: query: CREATE TABLE hbase_src(key STRING,
                        float_col FLOAT,
                        double_col DOUBLE,
                        string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_src
-POSTHOOK: query: CREATE TABLE hbase_src(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_src(key STRING,
                        tinyint_col TINYINT,
                        smallint_col SMALLINT,
                        int_col INT,
@@ -21,6 +22,7 @@ POSTHOOK: query: CREATE TABLE hbase_src(key STRING,
                        float_col FLOAT,
                        double_col DOUBLE,
                        string_col STRING)
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_src
@@ -50,21 +52,21 @@ PREHOOK: query: DROP TABLE t_hbase_maps
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase_maps
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           string_map_col MAP<STRING, STRING>,
                           simple_string_col STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-string:,cf-string:simple_string_col")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase_maps
-POSTHOOK: query: CREATE TABLE t_hbase_maps(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase_maps(key STRING,
                           string_map_col MAP<STRING, STRING>,
                           simple_string_col STRING)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping"=":key,cf-string:,cf-string:simple_string_col")
-TBLPROPERTIES ("hbase.table.name"="t_hive_maps")
+TBLPROPERTIES ("hbase.table.name"="t_hive_maps", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase_maps

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
index 172db75..1209c88 100644
--- a/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_binary_storage_queries.q.out
@@ -2,7 +2,7 @@ PREHOOK: query: DROP TABLE t_hbase
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -13,11 +13,12 @@ PREHOOK: query: CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary")
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase
-POSTHOOK: query: CREATE TABLE t_hbase(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -28,7 +29,8 @@ POSTHOOK: query: CREATE TABLE t_hbase(key STRING,
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES ("hbase.table.name" = "t_hive",
-               "hbase.table.default.storage.type" = "binary")
+               "hbase.table.default.storage.type" = "binary",
+               "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase
@@ -53,10 +55,12 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"bigint_col\":\"true\",\"boolean_col\":\"true\",\"double_col\":\"true\",\"float_col\":\"true\",\"int_col\":\"true\",\"key\":\"true\",\"smallint_col\":\"true\",\"tinyint_col\":\"true\"}}
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 	hbase.table.default.storage.type	binary              
 	hbase.table.name    	t_hive              
 	numFiles            	0                   
@@ -338,7 +342,7 @@ PREHOOK: query: DROP TABLE t_hbase_2
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE t_hbase_2
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE t_hbase_2(key STRING,
+PREHOOK: query: CREATE EXTERNAL TABLE t_hbase_2(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -348,11 +352,11 @@ PREHOOK: query: CREATE TABLE t_hbase_2(key STRING,
                      boolean_col BOOLEAN)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
-TBLPROPERTIES ("hbase.table.name" = "t_hive_2")
+TBLPROPERTIES ("hbase.table.name" = "t_hive_2", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase_2
-POSTHOOK: query: CREATE TABLE t_hbase_2(key STRING,
+POSTHOOK: query: CREATE EXTERNAL TABLE t_hbase_2(key STRING,
                      tinyint_col TINYINT,
                      smallint_col SMALLINT,
                      int_col INT,
@@ -362,7 +366,7 @@ POSTHOOK: query: CREATE TABLE t_hbase_2(key STRING,
                      boolean_col BOOLEAN)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
-TBLPROPERTIES ("hbase.table.name" = "t_hive_2")
+TBLPROPERTIES ("hbase.table.name" = "t_hive_2", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase_2
@@ -586,7 +590,8 @@ STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES (
 "hbase.table.name" = "t_hive_2",
-"hbase.table.default.storage.type" = "binary")
+"hbase.table.default.storage.type" = "binary",
+"external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@t_hbase_4
@@ -602,7 +607,8 @@ STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#-,cf:binarybyte#-,cf:binaryshort#-,cf:binaryint#-,cf:binarylong#-,cf:binaryfloat#-,cf:binarydouble#-,cf:binaryboolean#-")
 TBLPROPERTIES (
 "hbase.table.name" = "t_hive_2",
-"hbase.table.default.storage.type" = "binary")
+"hbase.table.default.storage.type" = "binary",
+"external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@t_hbase_4

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_custom_key.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_custom_key.q.out b/hbase-handler/src/test/results/positive/hbase_custom_key.q.out
index e5bc947..bd3bfcf 100644
--- a/hbase-handler/src/test/results/positive/hbase_custom_key.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_custom_key.q.out
@@ -1,18 +1,20 @@
-PREHOOK: query: CREATE TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_ck_1
-POSTHOOK: query: CREATE TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_ck_1(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_ck_1

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out b/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out
index 5d381e7..8355a43 100644
--- a/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out
@@ -1,18 +1,20 @@
-PREHOOK: query: CREATE TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom2",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory2")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_ck_4
-POSTHOOK: query: CREATE TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_ck_4(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom2",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory2")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_ck_4

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out b/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out
index 60721cb..012e6f3 100644
--- a/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out
@@ -1,18 +1,20 @@
-PREHOOK: query: CREATE TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom3",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory3")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_ck_5
-POSTHOOK: query: CREATE TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_ck_5(key struct<col1:string,col2:string,col3:string>, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
     "hbase.table.name" = "hbase_custom3",
     "hbase.columns.mapping" = ":key,cf:string",
     "hbase.composite.key.factory"="org.apache.hadoop.hive.hbase.SampleHBaseKeyFactory3")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_ck_5

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_ddl.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ddl.q.out b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
index 31baba3..296353c 100644
--- a/hbase-handler/src/test/results/positive/hbase_ddl.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ddl.q.out
@@ -2,17 +2,17 @@ PREHOOK: query: DROP TABLE hbase_table_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_1
@@ -111,9 +111,11 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 	hbase.mapred.output.outputtable	kkk                 
 	hbase.table.name    	hbase_table_0       
 #### A masked pattern was here ####
@@ -158,9 +160,11 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 	hbase.table.name    	hbase_table_0       
 #### A masked pattern was here ####
 	numFiles            	0                   


[05/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query94.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query94.q.out b/ql/src/test/results/clientpositive/perf/tez/query94.q.out
index 5d19a16..396be11 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query94.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query94.q.out
@@ -76,22 +76,22 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 9 vectorized
-      File Output Operator [FS_174]
-        Limit [LIM_173] (rows=1 width=344)
+      File Output Operator [FS_176]
+        Limit [LIM_175] (rows=1 width=344)
           Number of rows:100
-          Select Operator [SEL_172] (rows=1 width=344)
+          Select Operator [SEL_174] (rows=1 width=344)
             Output:["_col0","_col1","_col2"]
           <-Reducer 8 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_171]
-              Select Operator [SEL_170] (rows=1 width=344)
+            SHUFFLE [RS_173]
+              Select Operator [SEL_172] (rows=1 width=344)
                 Output:["_col1","_col2","_col3"]
-                Group By Operator [GBY_169] (rows=1 width=344)
+                Group By Operator [GBY_171] (rows=1 width=344)
                   Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"]
                 <-Reducer 7 [CUSTOM_SIMPLE_EDGE] vectorized
-                  PARTITION_ONLY_SHUFFLE [RS_168]
-                    Group By Operator [GBY_167] (rows=1 width=344)
+                  PARTITION_ONLY_SHUFFLE [RS_170]
+                    Group By Operator [GBY_169] (rows=1 width=344)
                       Output:["_col0","_col1","_col2"],aggregations:["count(_col0)","sum(_col1)","sum(_col2)"]
-                      Group By Operator [GBY_166] (rows=115958879 width=135)
+                      Group By Operator [GBY_168] (rows=115958879 width=135)
                         Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                       <-Reducer 6 [SIMPLE_EDGE]
                         SHUFFLE [RS_74]
@@ -102,21 +102,21 @@ Stage-0
                               Output:["_col4","_col5","_col6"]
                               Filter Operator [FIL_41] (rows=115958879 width=135)
                                 predicate:_col14 is null
-                                Merge Join Operator [MERGEJOIN_128] (rows=231917759 width=135)
-                                  Conds:RS_38._col4=RS_165._col0(Left Outer),Output:["_col4","_col5","_col6","_col14"]
+                                Merge Join Operator [MERGEJOIN_130] (rows=231917759 width=135)
+                                  Conds:RS_38._col4=RS_167._col0(Left Outer),Output:["_col4","_col5","_col6","_col14"]
                                 <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
-                                  FORWARD [RS_165]
+                                  FORWARD [RS_167]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_164] (rows=7199233 width=92)
+                                    Select Operator [SEL_166] (rows=7199233 width=92)
                                       Output:["_col0","_col1"]
-                                      Group By Operator [GBY_163] (rows=7199233 width=92)
+                                      Group By Operator [GBY_165] (rows=7199233 width=92)
                                         Output:["_col0"],keys:KEY._col0
                                       <-Map 17 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_162]
+                                        SHUFFLE [RS_164]
                                           PartitionCols:_col0
-                                          Group By Operator [GBY_161] (rows=14398467 width=92)
+                                          Group By Operator [GBY_163] (rows=14398467 width=92)
                                             Output:["_col0"],keys:wr_order_number
-                                            Filter Operator [FIL_160] (rows=14398467 width=92)
+                                            Filter Operator [FIL_162] (rows=14398467 width=92)
                                               predicate:wr_order_number is not null
                                               TableScan [TS_25] (rows=14398467 width=92)
                                                 default@web_returns,wr1,Tbl:COMPLETE,Col:NONE,Output:["wr_order_number"]
@@ -125,101 +125,101 @@ Stage-0
                                     PartitionCols:_col4
                                     Select Operator [SEL_37] (rows=210834322 width=135)
                                       Output:["_col4","_col5","_col6"]
-                                      Merge Join Operator [MERGEJOIN_127] (rows=210834322 width=135)
-                                        Conds:RS_34._col4=RS_159._col0(Left Semi),Output:["_col3","_col4","_col5","_col6","_col14"],residual filter predicates:{(_col3 <> _col14)}
+                                      Merge Join Operator [MERGEJOIN_129] (rows=210834322 width=135)
+                                        Conds:RS_34._col4=RS_161._col0(Left Semi),Output:["_col3","_col4","_col5","_col6","_col14"],residual filter predicates:{(_col3 <> _col14)}
                                       <-Map 16 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_159]
+                                        SHUFFLE [RS_161]
                                           PartitionCols:_col0
-                                          Group By Operator [GBY_158] (rows=144002668 width=135)
+                                          Group By Operator [GBY_160] (rows=144002668 width=135)
                                             Output:["_col0","_col1"],keys:_col0, _col1
-                                            Select Operator [SEL_157] (rows=144002668 width=135)
+                                            Select Operator [SEL_159] (rows=144002668 width=135)
                                               Output:["_col0","_col1"]
-                                              Filter Operator [FIL_156] (rows=144002668 width=135)
+                                              Filter Operator [FIL_158] (rows=144002668 width=135)
                                                 predicate:(ws_order_number is not null and ws_warehouse_sk is not null)
                                                 TableScan [TS_22] (rows=144002668 width=135)
                                                   default@web_sales,ws2,Tbl:COMPLETE,Col:NONE,Output:["ws_warehouse_sk","ws_order_number"]
                                       <-Reducer 4 [SIMPLE_EDGE]
                                         SHUFFLE [RS_34]
                                           PartitionCols:_col4
-                                          Merge Join Operator [MERGEJOIN_126] (rows=191667562 width=135)
-                                            Conds:RS_18._col2=RS_147._col0(Inner),Output:["_col3","_col4","_col5","_col6"]
+                                          Merge Join Operator [MERGEJOIN_128] (rows=191667562 width=135)
+                                            Conds:RS_18._col2=RS_149._col0(Inner),Output:["_col3","_col4","_col5","_col6"]
                                           <-Map 14 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_147]
+                                            SHUFFLE [RS_149]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_146] (rows=42 width=1850)
+                                              Select Operator [SEL_148] (rows=42 width=1850)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_145] (rows=42 width=1850)
+                                                Filter Operator [FIL_147] (rows=42 width=1850)
                                                   predicate:((web_company_name = 'pri') and web_site_sk is not null)
                                                   TableScan [TS_9] (rows=84 width=1850)
                                                     default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_company_name"]
                                           <-Reducer 3 [SIMPLE_EDGE]
                                             SHUFFLE [RS_18]
                                               PartitionCols:_col2
-                                              Merge Join Operator [MERGEJOIN_125] (rows=174243235 width=135)
-                                                Conds:RS_15._col1=RS_139._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6"]
+                                              Merge Join Operator [MERGEJOIN_127] (rows=174243235 width=135)
+                                                Conds:RS_15._col1=RS_141._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6"]
                                               <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_139]
+                                                SHUFFLE [RS_141]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_138] (rows=20000000 width=1014)
+                                                  Select Operator [SEL_140] (rows=20000000 width=1014)
                                                     Output:["_col0"]
-                                                    Filter Operator [FIL_137] (rows=20000000 width=1014)
+                                                    Filter Operator [FIL_139] (rows=20000000 width=1014)
                                                       predicate:((ca_state = 'TX') and ca_address_sk is not null)
                                                       TableScan [TS_6] (rows=40000000 width=1014)
                                                         default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
                                               <-Reducer 2 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_15]
                                                   PartitionCols:_col1
-                                                  Merge Join Operator [MERGEJOIN_124] (rows=158402938 width=135)
-                                                    Conds:RS_155._col0=RS_131._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                  Merge Join Operator [MERGEJOIN_126] (rows=158402938 width=135)
+                                                    Conds:RS_157._col0=RS_133._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
                                                   <-Map 10 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_131]
+                                                    SHUFFLE [RS_133]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_130] (rows=8116 width=1119)
+                                                      Select Operator [SEL_132] (rows=8116 width=1119)
                                                         Output:["_col0"]
-                                                        Filter Operator [FIL_129] (rows=8116 width=1119)
+                                                        Filter Operator [FIL_131] (rows=8116 width=1119)
                                                           predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00' AND TIMESTAMP'1999-06-30 00:00:00' and d_date_sk is not null)
                                                           TableScan [TS_3] (rows=73049 width=1119)
                                                             default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                                   <-Map 1 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_155]
+                                                    SHUFFLE [RS_157]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_154] (rows=144002668 width=135)
+                                                      Select Operator [SEL_156] (rows=144002668 width=135)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_153] (rows=144002668 width=135)
+                                                        Filter Operator [FIL_155] (rows=144002668 width=135)
                                                           predicate:((ws_ship_addr_sk BETWEEN DynamicValue(RS_16_customer_address_ca_address_sk_min) AND DynamicValue(RS_16_customer_address_ca_address_sk_max) and in_bloom_filter(ws_ship_addr_sk, DynamicValue(RS_16_customer_address_ca_address_sk_bloom_filter))) and (ws_ship_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(ws_ship_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_19_web_site_web_site_sk_min) AND DynamicValue(RS_19_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_19_web_site_web_site_sk_bloom_filter))) and ws_order_number is not null and ws_ship_addr_sk is not null and ws_ship_date_sk is not null and ws_web_site_sk is not null)
                                                           TableScan [TS_0] (rows=144002668 width=135)
                                                             default@web_sales,ws1,Tbl:COMPLETE,Col:NONE,Output:["ws_ship_date_sk","ws_ship_addr_sk","ws_web_site_sk","ws_warehouse_sk","ws_order_number","ws_ext_ship_cost","ws_net_profit"]
                                                           <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_136]
-                                                              Group By Operator [GBY_135] (rows=1 width=12)
+                                                            BROADCAST [RS_138]
+                                                              Group By Operator [GBY_137] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_134]
-                                                                  Group By Operator [GBY_133] (rows=1 width=12)
+                                                                SHUFFLE [RS_136]
+                                                                  Group By Operator [GBY_135] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_132] (rows=8116 width=1119)
+                                                                    Select Operator [SEL_134] (rows=8116 width=1119)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_130]
+                                                                       Please refer to the previous Select Operator [SEL_132]
                                                           <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_144]
-                                                              Group By Operator [GBY_143] (rows=1 width=12)
+                                                            BROADCAST [RS_146]
+                                                              Group By Operator [GBY_145] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
                                                               <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_142]
-                                                                  Group By Operator [GBY_141] (rows=1 width=12)
+                                                                SHUFFLE [RS_144]
+                                                                  Group By Operator [GBY_143] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                                    Select Operator [SEL_140] (rows=20000000 width=1014)
+                                                                    Select Operator [SEL_142] (rows=20000000 width=1014)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_138]
+                                                                       Please refer to the previous Select Operator [SEL_140]
                                                           <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_152]
-                                                              Group By Operator [GBY_151] (rows=1 width=12)
+                                                            BROADCAST [RS_154]
+                                                              Group By Operator [GBY_153] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_150]
-                                                                  Group By Operator [GBY_149] (rows=1 width=12)
+                                                                SHUFFLE [RS_152]
+                                                                  Group By Operator [GBY_151] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_148] (rows=42 width=1850)
+                                                                    Select Operator [SEL_150] (rows=42 width=1850)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_146]
+                                                                       Please refer to the previous Select Operator [SEL_148]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query95.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query95.q.out b/ql/src/test/results/clientpositive/perf/tez/query95.q.out
index 400cc19..3a8ed09 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query95.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query95.q.out
@@ -63,22 +63,22 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 1 <- Reducer 10 (BROADCAST_EDGE), Reducer 12 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE)
-Map 19 <- Reducer 25 (BROADCAST_EDGE)
-Map 23 <- Reducer 25 (BROADCAST_EDGE)
+Map 1 <- Reducer 10 (BROADCAST_EDGE), Reducer 12 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE), Reducer 23 (BROADCAST_EDGE)
+Map 15 <- Reducer 23 (BROADCAST_EDGE)
+Map 21 <- Reducer 23 (BROADCAST_EDGE)
 Reducer 10 <- Map 9 (CUSTOM_SIMPLE_EDGE)
 Reducer 12 <- Map 11 (CUSTOM_SIMPLE_EDGE)
 Reducer 14 <- Map 13 (CUSTOM_SIMPLE_EDGE)
-Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 18 (SIMPLE_EDGE)
-Reducer 17 <- Reducer 16 (SIMPLE_EDGE)
+Reducer 16 <- Map 15 (SIMPLE_EDGE), Map 21 (SIMPLE_EDGE)
+Reducer 17 <- Map 22 (SIMPLE_EDGE), Reducer 16 (ONE_TO_ONE_EDGE)
+Reducer 18 <- Reducer 17 (SIMPLE_EDGE)
+Reducer 19 <- Map 15 (SIMPLE_EDGE), Map 21 (SIMPLE_EDGE)
 Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 9 (SIMPLE_EDGE)
-Reducer 20 <- Map 19 (SIMPLE_EDGE), Map 23 (SIMPLE_EDGE)
-Reducer 21 <- Map 24 (SIMPLE_EDGE), Reducer 20 (ONE_TO_ONE_EDGE)
-Reducer 22 <- Reducer 21 (SIMPLE_EDGE)
-Reducer 25 <- Map 24 (CUSTOM_SIMPLE_EDGE)
+Reducer 20 <- Reducer 19 (SIMPLE_EDGE)
+Reducer 23 <- Map 22 (CUSTOM_SIMPLE_EDGE)
 Reducer 3 <- Map 11 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
 Reducer 4 <- Map 13 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE)
-Reducer 5 <- Reducer 17 (ONE_TO_ONE_EDGE), Reducer 22 (ONE_TO_ONE_EDGE), Reducer 4 (SIMPLE_EDGE)
+Reducer 5 <- Reducer 18 (ONE_TO_ONE_EDGE), Reducer 20 (ONE_TO_ONE_EDGE), Reducer 4 (SIMPLE_EDGE)
 Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
 Reducer 7 <- Reducer 6 (CUSTOM_SIMPLE_EDGE)
 Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
@@ -88,208 +88,201 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 8 vectorized
-      File Output Operator [FS_273]
-        Limit [LIM_272] (rows=1 width=344)
+      File Output Operator [FS_286]
+        Limit [LIM_285] (rows=1 width=344)
           Number of rows:100
-          Select Operator [SEL_271] (rows=1 width=344)
+          Select Operator [SEL_284] (rows=1 width=344)
             Output:["_col0","_col1","_col2"]
           <-Reducer 7 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_270]
-              Select Operator [SEL_269] (rows=1 width=344)
+            SHUFFLE [RS_283]
+              Select Operator [SEL_282] (rows=1 width=344)
                 Output:["_col1","_col2","_col3"]
-                Group By Operator [GBY_268] (rows=1 width=344)
+                Group By Operator [GBY_281] (rows=1 width=344)
                   Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"]
                 <-Reducer 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                  PARTITION_ONLY_SHUFFLE [RS_267]
-                    Group By Operator [GBY_266] (rows=1 width=344)
+                  PARTITION_ONLY_SHUFFLE [RS_280]
+                    Group By Operator [GBY_279] (rows=1 width=344)
                       Output:["_col0","_col1","_col2"],aggregations:["count(_col0)","sum(_col1)","sum(_col2)"]
-                      Group By Operator [GBY_265] (rows=421668645 width=135)
+                      Group By Operator [GBY_278] (rows=421668645 width=135)
                         Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                       <-Reducer 5 [SIMPLE_EDGE]
                         SHUFFLE [RS_116]
                           PartitionCols:_col0
                           Group By Operator [GBY_115] (rows=421668645 width=135)
                             Output:["_col0","_col2","_col3"],aggregations:["sum(_col4)","sum(_col5)"],keys:_col3
-                            Merge Join Operator [MERGEJOIN_212] (rows=421668645 width=135)
-                              Conds:RS_58._col3=RS_247._col0(Inner),RS_58._col3=RS_264._col0(Inner),Output:["_col3","_col4","_col5"]
-                            <-Reducer 17 [ONE_TO_ONE_EDGE] vectorized
-                              FORWARD [RS_247]
+                            Merge Join Operator [MERGEJOIN_228] (rows=421668645 width=135)
+                              Conds:RS_58._col3=RS_277._col0(Inner),RS_58._col3=RS_275._col0(Inner),Output:["_col3","_col4","_col5"]
+                            <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_275]
                                 PartitionCols:_col0
-                                Group By Operator [GBY_246] (rows=79201469 width=135)
+                                Group By Operator [GBY_274] (rows=87121617 width=135)
                                   Output:["_col0"],keys:KEY._col0
-                                <-Reducer 16 [SIMPLE_EDGE]
-                                  SHUFFLE [RS_24]
-                                    PartitionCols:_col0
-                                    Group By Operator [GBY_23] (rows=158402938 width=135)
-                                      Output:["_col0"],keys:_col1
-                                      Select Operator [SEL_22] (rows=158402938 width=135)
-                                        Output:["_col1"]
-                                        Filter Operator [FIL_21] (rows=158402938 width=135)
-                                          predicate:(_col0 <> _col2)
-                                          Merge Join Operator [MERGEJOIN_209] (rows=158402938 width=135)
-                                            Conds:RS_242._col1=RS_245._col1(Inner),Output:["_col0","_col1","_col2"]
-                                          <-Map 15 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_242]
-                                              PartitionCols:_col1
-                                              Select Operator [SEL_241] (rows=144002668 width=135)
-                                                Output:["_col0","_col1"]
-                                                Filter Operator [FIL_240] (rows=144002668 width=135)
-                                                  predicate:ws_order_number is not null
-                                                  TableScan [TS_12] (rows=144002668 width=135)
-                                                    default@web_sales,ws1,Tbl:COMPLETE,Col:NONE,Output:["ws_warehouse_sk","ws_order_number"]
-                                          <-Map 18 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_245]
-                                              PartitionCols:_col1
-                                              Select Operator [SEL_244] (rows=144002668 width=135)
-                                                Output:["_col0","_col1"]
-                                                Filter Operator [FIL_243] (rows=144002668 width=135)
-                                                  predicate:ws_order_number is not null
-                                                  TableScan [TS_15] (rows=144002668 width=135)
-                                                    default@web_sales,ws2,Tbl:COMPLETE,Col:NONE,Output:["ws_warehouse_sk","ws_order_number"]
-                            <-Reducer 22 [ONE_TO_ONE_EDGE] vectorized
-                              FORWARD [RS_264]
-                                PartitionCols:_col0
-                                Group By Operator [GBY_263] (rows=87121617 width=135)
-                                  Output:["_col0"],keys:KEY._col0
-                                <-Reducer 21 [SIMPLE_EDGE]
+                                <-Reducer 17 [SIMPLE_EDGE]
                                   SHUFFLE [RS_46]
                                     PartitionCols:_col0
                                     Group By Operator [GBY_45] (rows=174243235 width=135)
                                       Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_211] (rows=174243235 width=135)
-                                        Conds:RS_41._col0=RS_250._col0(Inner),Output:["_col1"]
-                                      <-Map 24 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_250]
+                                      Merge Join Operator [MERGEJOIN_227] (rows=174243235 width=135)
+                                        Conds:RS_41._col0=RS_255._col0(Inner),Output:["_col1"]
+                                      <-Map 22 [SIMPLE_EDGE] vectorized
+                                        PARTITION_ONLY_SHUFFLE [RS_255]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_249] (rows=14398467 width=92)
+                                          Select Operator [SEL_254] (rows=14398467 width=92)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_248] (rows=14398467 width=92)
+                                            Filter Operator [FIL_253] (rows=14398467 width=92)
                                               predicate:wr_order_number is not null
                                               TableScan [TS_38] (rows=14398467 width=92)
                                                 default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_order_number"]
-                                      <-Reducer 20 [ONE_TO_ONE_EDGE]
+                                      <-Reducer 16 [ONE_TO_ONE_EDGE]
                                         FORWARD [RS_41]
                                           PartitionCols:_col0
                                           Select Operator [SEL_37] (rows=158402938 width=135)
                                             Output:["_col0"]
                                             Filter Operator [FIL_36] (rows=158402938 width=135)
                                               predicate:(_col0 <> _col2)
-                                              Merge Join Operator [MERGEJOIN_210] (rows=158402938 width=135)
-                                                Conds:RS_259._col1=RS_262._col1(Inner),Output:["_col0","_col1","_col2"]
-                                              <-Map 19 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_259]
+                                              Merge Join Operator [MERGEJOIN_226] (rows=158402938 width=135)
+                                                Conds:RS_268._col1=RS_272._col1(Inner),Output:["_col0","_col1","_col2"]
+                                              <-Map 15 [SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_268]
                                                   PartitionCols:_col1
-                                                  Select Operator [SEL_258] (rows=144002668 width=135)
+                                                  Select Operator [SEL_267] (rows=144002668 width=135)
                                                     Output:["_col0","_col1"]
-                                                    Filter Operator [FIL_257] (rows=144002668 width=135)
+                                                    Filter Operator [FIL_266] (rows=144002668 width=135)
                                                       predicate:((ws_order_number BETWEEN DynamicValue(RS_42_web_returns_wr_order_number_min) AND DynamicValue(RS_42_web_returns_wr_order_number_max) and in_bloom_filter(ws_order_number, DynamicValue(RS_42_web_returns_wr_order_number_bloom_filter))) and ws_order_number is not null)
                                                       TableScan [TS_27] (rows=144002668 width=135)
                                                         default@web_sales,ws1,Tbl:COMPLETE,Col:NONE,Output:["ws_warehouse_sk","ws_order_number"]
-                                                      <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_255]
-                                                          Group By Operator [GBY_254] (rows=1 width=12)
+                                                      <-Reducer 23 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_261]
+                                                          Group By Operator [GBY_259] (rows=1 width=12)
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=14398467)"]
-                                                          <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            PARTITION_ONLY_SHUFFLE [RS_253]
-                                                              Group By Operator [GBY_252] (rows=1 width=12)
+                                                          <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                            PARTITION_ONLY_SHUFFLE [RS_258]
+                                                              Group By Operator [GBY_257] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=14398467)"]
-                                                                Select Operator [SEL_251] (rows=14398467 width=92)
+                                                                Select Operator [SEL_256] (rows=14398467 width=92)
                                                                   Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_249]
-                                              <-Map 23 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_262]
+                                                                   Please refer to the previous Select Operator [SEL_254]
+                                              <-Map 21 [SIMPLE_EDGE] vectorized
+                                                SHUFFLE [RS_272]
                                                   PartitionCols:_col1
-                                                  Select Operator [SEL_261] (rows=144002668 width=135)
+                                                  Select Operator [SEL_271] (rows=144002668 width=135)
                                                     Output:["_col0","_col1"]
-                                                    Filter Operator [FIL_260] (rows=144002668 width=135)
+                                                    Filter Operator [FIL_270] (rows=144002668 width=135)
                                                       predicate:((ws_order_number BETWEEN DynamicValue(RS_42_web_returns_wr_order_number_min) AND DynamicValue(RS_42_web_returns_wr_order_number_max) and in_bloom_filter(ws_order_number, DynamicValue(RS_42_web_returns_wr_order_number_bloom_filter))) and ws_order_number is not null)
                                                       TableScan [TS_30] (rows=144002668 width=135)
                                                         default@web_sales,ws2,Tbl:COMPLETE,Col:NONE,Output:["ws_warehouse_sk","ws_order_number"]
-                                                      <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_256]
-                                                           Please refer to the previous Group By Operator [GBY_254]
+                                                      <-Reducer 23 [BROADCAST_EDGE] vectorized
+                                                        BROADCAST [RS_262]
+                                                           Please refer to the previous Group By Operator [GBY_259]
+                            <-Reducer 20 [ONE_TO_ONE_EDGE] vectorized
+                              FORWARD [RS_277]
+                                PartitionCols:_col0
+                                Group By Operator [GBY_276] (rows=79201469 width=135)
+                                  Output:["_col0"],keys:KEY._col0
+                                <-Reducer 19 [SIMPLE_EDGE]
+                                  SHUFFLE [RS_24]
+                                    PartitionCols:_col0
+                                    Group By Operator [GBY_23] (rows=158402938 width=135)
+                                      Output:["_col0"],keys:_col1
+                                      Select Operator [SEL_22] (rows=158402938 width=135)
+                                        Output:["_col1"]
+                                        Filter Operator [FIL_21] (rows=158402938 width=135)
+                                          predicate:(_col0 <> _col2)
+                                          Merge Join Operator [MERGEJOIN_225] (rows=158402938 width=135)
+                                            Conds:RS_269._col1=RS_273._col1(Inner),Output:["_col0","_col1","_col2"]
+                                          <-Map 15 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_269]
+                                              PartitionCols:_col1
+                                               Please refer to the previous Select Operator [SEL_267]
+                                          <-Map 21 [SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_273]
+                                              PartitionCols:_col1
+                                               Please refer to the previous Select Operator [SEL_271]
                             <-Reducer 4 [SIMPLE_EDGE]
                               SHUFFLE [RS_58]
                                 PartitionCols:_col3
-                                Merge Join Operator [MERGEJOIN_208] (rows=191667562 width=135)
-                                  Conds:RS_55._col2=RS_231._col0(Inner),Output:["_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_224] (rows=191667562 width=135)
+                                  Conds:RS_55._col2=RS_247._col0(Inner),Output:["_col3","_col4","_col5"]
                                 <-Map 13 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_231]
+                                  SHUFFLE [RS_247]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_230] (rows=42 width=1850)
+                                    Select Operator [SEL_246] (rows=42 width=1850)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_229] (rows=42 width=1850)
+                                      Filter Operator [FIL_245] (rows=42 width=1850)
                                         predicate:((web_company_name = 'pri') and web_site_sk is not null)
                                         TableScan [TS_9] (rows=84 width=1850)
                                           default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_company_name"]
                                 <-Reducer 3 [SIMPLE_EDGE]
                                   SHUFFLE [RS_55]
                                     PartitionCols:_col2
-                                    Merge Join Operator [MERGEJOIN_207] (rows=174243235 width=135)
-                                      Conds:RS_52._col1=RS_223._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
+                                    Merge Join Operator [MERGEJOIN_223] (rows=174243235 width=135)
+                                      Conds:RS_52._col1=RS_239._col0(Inner),Output:["_col2","_col3","_col4","_col5"]
                                     <-Map 11 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_223]
+                                      SHUFFLE [RS_239]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_222] (rows=20000000 width=1014)
+                                        Select Operator [SEL_238] (rows=20000000 width=1014)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_221] (rows=20000000 width=1014)
+                                          Filter Operator [FIL_237] (rows=20000000 width=1014)
                                             predicate:((ca_state = 'TX') and ca_address_sk is not null)
                                             TableScan [TS_6] (rows=40000000 width=1014)
                                               default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
                                     <-Reducer 2 [SIMPLE_EDGE]
                                       SHUFFLE [RS_52]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_206] (rows=158402938 width=135)
-                                          Conds:RS_239._col0=RS_215._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                        Merge Join Operator [MERGEJOIN_222] (rows=158402938 width=135)
+                                          Conds:RS_265._col0=RS_231._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_215]
+                                          SHUFFLE [RS_231]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_214] (rows=8116 width=1119)
+                                            Select Operator [SEL_230] (rows=8116 width=1119)
                                               Output:["_col0"]
-                                              Filter Operator [FIL_213] (rows=8116 width=1119)
+                                              Filter Operator [FIL_229] (rows=8116 width=1119)
                                                 predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1999-05-01 00:00:00' AND TIMESTAMP'1999-06-30 00:00:00' and d_date_sk is not null)
                                                 TableScan [TS_3] (rows=73049 width=1119)
                                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                         <-Map 1 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_239]
+                                          SHUFFLE [RS_265]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_238] (rows=144002668 width=135)
+                                            Select Operator [SEL_264] (rows=144002668 width=135)
                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                              Filter Operator [FIL_237] (rows=144002668 width=135)
-                                                predicate:((ws_ship_addr_sk BETWEEN DynamicValue(RS_53_customer_address_ca_address_sk_min) AND DynamicValue(RS_53_customer_address_ca_address_sk_max) and in_bloom_filter(ws_ship_addr_sk, DynamicValue(RS_53_customer_address_ca_address_sk_bloom_filter))) and (ws_ship_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ws_ship_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_56_web_site_web_site_sk_min) AND DynamicValue(RS_56_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_56_web_site_web_site_sk_bloom_filter))) and ws_order_number is not null and ws_ship_addr_sk is not null and ws_ship_date_sk is not null and ws_web_site_sk is not null)
+                                              Filter Operator [FIL_263] (rows=144002668 width=135)
+                                                predicate:((ws_order_number BETWEEN DynamicValue(RS_42_web_returns_wr_order_number_min) AND DynamicValue(RS_42_web_returns_wr_order_number_max) and in_bloom_filter(ws_order_number, DynamicValue(RS_42_web_returns_wr_order_number_bloom_filter))) and (ws_ship_addr_sk BETWEEN DynamicValue(RS_53_customer_address_ca_address_sk_min) AND DynamicValue(RS_53_customer_address_ca_address_sk_max) and in_bloom_filter(ws_ship_addr_sk, DynamicValue(RS_53_customer_address_ca_address_sk_bloom_filter))) and (ws_ship_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ws_ship_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_56_web_site_web_site_sk_min) AND DynamicValue(RS_56_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_56_web_site_web_site_sk_bloom_filter))) and ws_order_number is not null and
  ws_ship_addr_sk is not null and ws_ship_date_sk is not null and ws_web_site_sk is not null)
                                                 TableScan [TS_0] (rows=144002668 width=135)
                                                   default@web_sales,ws1,Tbl:COMPLETE,Col:NONE,Output:["ws_ship_date_sk","ws_ship_addr_sk","ws_web_site_sk","ws_order_number","ws_ext_ship_cost","ws_net_profit"]
+                                                <-Reducer 23 [BROADCAST_EDGE] vectorized
+                                                  BROADCAST [RS_260]
+                                                     Please refer to the previous Group By Operator [GBY_259]
                                                 <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_220]
-                                                    Group By Operator [GBY_219] (rows=1 width=12)
+                                                  BROADCAST [RS_236]
+                                                    Group By Operator [GBY_235] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_218]
-                                                        Group By Operator [GBY_217] (rows=1 width=12)
+                                                      SHUFFLE [RS_234]
+                                                        Group By Operator [GBY_233] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_216] (rows=8116 width=1119)
+                                                          Select Operator [SEL_232] (rows=8116 width=1119)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_214]
+                                                             Please refer to the previous Select Operator [SEL_230]
                                                 <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_228]
-                                                    Group By Operator [GBY_227] (rows=1 width=12)
+                                                  BROADCAST [RS_244]
+                                                    Group By Operator [GBY_243] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
                                                     <-Map 11 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_226]
-                                                        Group By Operator [GBY_225] (rows=1 width=12)
+                                                      SHUFFLE [RS_242]
+                                                        Group By Operator [GBY_241] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                          Select Operator [SEL_224] (rows=20000000 width=1014)
+                                                          Select Operator [SEL_240] (rows=20000000 width=1014)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_222]
+                                                             Please refer to the previous Select Operator [SEL_238]
                                                 <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_236]
-                                                    Group By Operator [GBY_235] (rows=1 width=12)
+                                                  BROADCAST [RS_252]
+                                                    Group By Operator [GBY_251] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_234]
-                                                        Group By Operator [GBY_233] (rows=1 width=12)
+                                                      SHUFFLE [RS_250]
+                                                        Group By Operator [GBY_249] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_232] (rows=42 width=1850)
+                                                          Select Operator [SEL_248] (rows=42 width=1850)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_230]
+                                                             Please refer to the previous Select Operator [SEL_246]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
index eafc1c4..a141409 100644
--- a/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
+++ b/ql/src/test/results/clientpositive/spark/spark_dynamic_partition_pruning_3.q.out
@@ -366,7 +366,7 @@ STAGE PLANS:
                           outputColumnNames: _col0
                           Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                           Spark Partition Pruning Sink Operator
-                            Target Columns: [Map 4 -> [part_col:int (part_col)]]
+                            Target Columns: [Map 1 -> [part_col:int (part_col)], Map 4 -> [part_col:int (part_col)]]
                             Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
             Local Work:
               Map Reduce Local Work
@@ -432,7 +432,6 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: partitioned_table1
-                  filterExpr: (part_col > 1) (type: boolean)
                   Statistics: Num rows: 12 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                   Select Operator
                     expressions: part_col (type: int)


[38/48] hive git commit: HIVE-19486: Discrepancy in HikariCP config naming (Antal Sinkovits via Adam Szita)

Posted by se...@apache.org.
HIVE-19486: Discrepancy in HikariCP config naming (Antal Sinkovits via Adam Szita)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dceeefbd
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dceeefbd
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dceeefbd

Branch: refs/heads/master-txnstats
Commit: dceeefbdf5e4f6fea83cb6ca5c11fbac10e77677
Parents: 3e02354
Author: Antal Sinkovits <as...@cloudera.com>
Authored: Tue Jul 17 10:45:54 2018 +0200
Committer: Adam Szita <sz...@cloudera.com>
Committed: Tue Jul 17 11:25:51 2018 +0200

----------------------------------------------------------------------
 common/src/java/org/apache/hadoop/hive/conf/HiveConf.java          | 2 +-
 .../src/test/java/org/apache/hive/jdbc/TestRestrictedList.java     | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/dceeefbd/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 858c630..4ed1636 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -4358,7 +4358,7 @@ public class HiveConf extends Configuration {
             "bonecp.,"+
             "hive.druid.broker.address.default,"+
             "hive.druid.coordinator.address.default,"+
-            "hikari.,"+
+            "hikaricp.,"+
             "hadoop.bin.path,"+
             "yarn.bin.path,"+
             "spark.home",

http://git-wip-us.apache.org/repos/asf/hive/blob/dceeefbd/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
index cb005bf..f3f50a6e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
+++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
@@ -95,7 +95,7 @@ public class TestRestrictedList {
     addToExpectedRestrictedMap("bonecp.test");
     addToExpectedRestrictedMap("hive.druid.broker.address.default");
     addToExpectedRestrictedMap("hive.druid.coordinator.address.default");
-    addToExpectedRestrictedMap("hikari.test");
+    addToExpectedRestrictedMap("hikaricp.test");
     addToExpectedRestrictedMap("hadoop.bin.path");
     addToExpectedRestrictedMap("yarn.bin.path");
     addToExpectedRestrictedMap("hive.spark.client.connect.timeout");


[31/48] hive git commit: HIVE-20174: Vectorization: Fix NULL / Wrong Results issues in GROUP BY Aggregation Functions (Matt McCline, reviewed by Teddy Choi)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
index a503445..7f2a18a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64.java
@@ -164,15 +164,9 @@ public class VectorUDAFSumDecimal64 extends VectorAggregateExpression {
       }
     } else {
       if (inputVector.isRepeating) {
-        if (batch.selectedInUse) {
-          iterateHasNullsRepeatingSelectionWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0], batchSize, batch.selected, inputVector.isNull);
-        } else {
-          iterateHasNullsRepeatingWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0], batchSize, inputVector.isNull);
-        }
+        iterateHasNullsRepeatingWithAggregationSelection(
+          aggregationBufferSets, aggregateIndex,
+          vector[0], batchSize, inputVector.isNull);
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(
@@ -232,28 +226,6 @@ public class VectorUDAFSumDecimal64 extends VectorAggregateExpression {
     }
   }
 
-  private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-    VectorAggregationBufferRow[] aggregationBufferSets,
-    int aggregateIndex,
-    long value,
-    int batchSize,
-    int[] selection,
-    boolean[] isNull) {
-
-    if (isNull[0]) {
-      return;
-    }
-
-    for (int i=0; i < batchSize; ++i) {
-      Aggregation myagg = getCurrentAggregationBuffer(
-        aggregationBufferSets,
-        aggregateIndex,
-        i);
-      myagg.sumValue(value);
-    }
-
-  }
-
   private void iterateHasNullsRepeatingWithAggregationSelection(
     VectorAggregationBufferRow[] aggregationBufferSets,
     int aggregateIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
index 117611e..a02bdf3 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal64ToDecimal.java
@@ -189,15 +189,9 @@ public class VectorUDAFSumDecimal64ToDecimal extends VectorAggregateExpression {
       }
     } else {
       if (inputVector.isRepeating) {
-        if (batch.selectedInUse) {
-          iterateHasNullsRepeatingSelectionWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0], batchSize, batch.selected, inputVector.isNull);
-        } else {
-          iterateHasNullsRepeatingWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0], batchSize, inputVector.isNull);
-        }
+        iterateHasNullsRepeatingWithAggregationSelection(
+          aggregationBufferSets, aggregateIndex,
+          vector[0], batchSize, inputVector.isNull);
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(
@@ -257,28 +251,6 @@ public class VectorUDAFSumDecimal64ToDecimal extends VectorAggregateExpression {
     }
   }
 
-  private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-    VectorAggregationBufferRow[] aggregationBufferSets,
-    int aggregateIndex,
-    long value,
-    int batchSize,
-    int[] selection,
-    boolean[] isNull) {
-
-    if (isNull[0]) {
-      return;
-    }
-
-    for (int i=0; i < batchSize; ++i) {
-      Aggregation myagg = getCurrentAggregationBuffer(
-        aggregationBufferSets,
-        aggregateIndex,
-        i);
-      myagg.sumValue(value);
-    }
-
-  }
-
   private void iterateHasNullsRepeatingWithAggregationSelection(
     VectorAggregationBufferRow[] aggregationBufferSets,
     int aggregateIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
index e542033..731a143 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumTimestamp.java
@@ -131,15 +131,9 @@ public class VectorUDAFSumTimestamp extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              inputVector.getDouble(0), batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              inputVector.getDouble(0), batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregateIndex,
+            inputVector.getDouble(0), batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -199,28 +193,6 @@ public class VectorUDAFSumTimestamp extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      double value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          aggregateIndex,
-          i);
-        myagg.sumValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
index 7afbf04..7ec80e6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java
@@ -4183,7 +4183,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       AggregationDesc aggrDesc, VectorizationContext vContext) throws HiveException {
 
     String aggregateName = aggrDesc.getGenericUDAFName();
-    ArrayList<ExprNodeDesc> parameterList = aggrDesc.getParameters();
+    List<ExprNodeDesc> parameterList = aggrDesc.getParameters();
     final int parameterCount = parameterList.size();
     final GenericUDAFEvaluator.Mode udafEvaluatorMode = aggrDesc.getMode();
 
@@ -4192,10 +4192,9 @@ public class Vectorizer implements PhysicalPlanResolver {
      */
     GenericUDAFEvaluator evaluator = aggrDesc.getGenericUDAFEvaluator();
 
-    ArrayList<ExprNodeDesc> parameters = aggrDesc.getParameters();
     ObjectInspector[] parameterObjectInspectors = new ObjectInspector[parameterCount];
     for (int i = 0; i < parameterCount; i++) {
-      TypeInfo typeInfo = parameters.get(i).getTypeInfo();
+      TypeInfo typeInfo = parameterList.get(i).getTypeInfo();
       parameterObjectInspectors[i] = TypeInfoUtils
           .getStandardWritableObjectInspectorFromTypeInfo(typeInfo);
     }
@@ -4207,18 +4206,30 @@ public class Vectorizer implements PhysicalPlanResolver {
             aggrDesc.getMode(),
             parameterObjectInspectors);
 
+    final TypeInfo outputTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(returnOI.getTypeName());
+
+    return getVectorAggregationDesc(
+        aggregateName, parameterList, evaluator, outputTypeInfo, udafEvaluatorMode, vContext);
+  }
+
+  public static ImmutablePair<VectorAggregationDesc,String> getVectorAggregationDesc(
+      String aggregationName, List<ExprNodeDesc> parameterList,
+      GenericUDAFEvaluator evaluator, TypeInfo outputTypeInfo,
+      GenericUDAFEvaluator.Mode udafEvaluatorMode,
+      VectorizationContext vContext)
+          throws HiveException {
+
     VectorizedUDAFs annotation =
         AnnotationUtils.getAnnotation(evaluator.getClass(), VectorizedUDAFs.class);
     if (annotation == null) {
       String issue =
           "Evaluator " + evaluator.getClass().getSimpleName() + " does not have a " +
-          "vectorized UDAF annotation (aggregation: \"" + aggregateName + "\"). " +
+          "vectorized UDAF annotation (aggregation: \"" + aggregationName + "\"). " +
           "Vectorization not supported";
       return new ImmutablePair<VectorAggregationDesc,String>(null, issue);
     }
     final Class<? extends VectorAggregateExpression>[] vecAggrClasses = annotation.value();
 
-    final TypeInfo outputTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(returnOI.getTypeName());
 
     // Not final since it may change later due to DECIMAL_64.
     ColumnVector.Type outputColVectorType =
@@ -4233,6 +4244,7 @@ public class Vectorizer implements PhysicalPlanResolver {
     VectorExpression inputExpression;
     ColumnVector.Type inputColVectorType;
 
+    final int parameterCount = parameterList.size();
     if (parameterCount == 0) {
 
       // COUNT(*)
@@ -4246,7 +4258,7 @@ public class Vectorizer implements PhysicalPlanResolver {
       inputTypeInfo = exprNodeDesc.getTypeInfo();
       if (inputTypeInfo == null) {
         String issue ="Aggregations with null parameter type not supported " +
-            aggregateName + "(" + parameterList.toString() + ")";
+            aggregationName + "(" + parameterList.toString() + ")";
         return new ImmutablePair<VectorAggregationDesc,String>(null, issue);
       }
 
@@ -4260,12 +4272,12 @@ public class Vectorizer implements PhysicalPlanResolver {
               exprNodeDesc, VectorExpressionDescriptor.Mode.PROJECTION);
       if (inputExpression == null) {
         String issue ="Parameter expression " + exprNodeDesc.toString() + " not supported " +
-            aggregateName + "(" + parameterList.toString() + ")";
+            aggregationName + "(" + parameterList.toString() + ")";
         return new ImmutablePair<VectorAggregationDesc,String>(null, issue);
       }
       if (inputExpression.getOutputTypeInfo() == null) {
         String issue ="Parameter expression " + exprNodeDesc.toString() + " with null type not supported " +
-            aggregateName + "(" + parameterList.toString() + ")";
+            aggregationName + "(" + parameterList.toString() + ")";
         return new ImmutablePair<VectorAggregationDesc,String>(null, issue);
       }
       inputColVectorType = inputExpression.getOutputColumnVectorType();
@@ -4273,7 +4285,7 @@ public class Vectorizer implements PhysicalPlanResolver {
 
       // No multi-parameter aggregations supported.
       String issue ="Aggregations with > 1 parameter are not supported " +
-          aggregateName + "(" + parameterList.toString() + ")";
+          aggregationName + "(" + parameterList.toString() + ")";
       return new ImmutablePair<VectorAggregationDesc,String>(null, issue);
     }
 
@@ -4291,12 +4303,13 @@ public class Vectorizer implements PhysicalPlanResolver {
           // Try with DECIMAL_64 input and DECIMAL_64 output.
           final Class<? extends VectorAggregateExpression> vecAggrClass =
               findVecAggrClass(
-                  vecAggrClasses, aggregateName, inputColVectorType,
+                  vecAggrClasses, aggregationName, inputColVectorType,
                   ColumnVector.Type.DECIMAL_64, udafEvaluatorMode);
           if (vecAggrClass != null) {
             final VectorAggregationDesc vecAggrDesc =
                 new VectorAggregationDesc(
-                    aggrDesc, evaluator, inputTypeInfo, inputColVectorType, inputExpression,
+                    aggregationName, evaluator, udafEvaluatorMode,
+                    inputTypeInfo, inputColVectorType, inputExpression,
                     outputTypeInfo, ColumnVector.Type.DECIMAL_64, vecAggrClass);
             return new ImmutablePair<VectorAggregationDesc,String>(vecAggrDesc, null);
           }
@@ -4305,12 +4318,13 @@ public class Vectorizer implements PhysicalPlanResolver {
         // Try with regular DECIMAL output type.
         final Class<? extends VectorAggregateExpression> vecAggrClass =
             findVecAggrClass(
-                vecAggrClasses, aggregateName, inputColVectorType,
+                vecAggrClasses, aggregationName, inputColVectorType,
                 outputColVectorType, udafEvaluatorMode);
         if (vecAggrClass != null) {
           final VectorAggregationDesc vecAggrDesc =
               new VectorAggregationDesc(
-                  aggrDesc, evaluator, inputTypeInfo, inputColVectorType, inputExpression,
+                  aggregationName, evaluator, udafEvaluatorMode,
+                  inputTypeInfo, inputColVectorType, inputExpression,
                   outputTypeInfo, outputColVectorType, vecAggrClass);
           return new ImmutablePair<VectorAggregationDesc,String>(vecAggrDesc, null);
         }
@@ -4325,19 +4339,20 @@ public class Vectorizer implements PhysicalPlanResolver {
         // Try with with DECIMAL_64 input and desired output type.
         final Class<? extends VectorAggregateExpression> vecAggrClass =
             findVecAggrClass(
-                vecAggrClasses, aggregateName, inputColVectorType,
+                vecAggrClasses, aggregationName, inputColVectorType,
                 outputColVectorType, udafEvaluatorMode);
         if (vecAggrClass != null) {
           // for now, disable operating on decimal64 column vectors for semijoin reduction as
           // we have to make sure same decimal type should be used during bloom filter creation
           // and bloom filter probing
-          if (aggregateName.equals("bloom_filter")) {
+          if (aggregationName.equals("bloom_filter")) {
             inputExpression = vContext.wrapWithDecimal64ToDecimalConversion(inputExpression);
             inputColVectorType = ColumnVector.Type.DECIMAL;
           }
           final VectorAggregationDesc vecAggrDesc =
               new VectorAggregationDesc(
-                  aggrDesc, evaluator, inputTypeInfo, inputColVectorType, inputExpression,
+                  aggregationName, evaluator, udafEvaluatorMode,
+                  inputTypeInfo, inputColVectorType, inputExpression,
                   outputTypeInfo, outputColVectorType, vecAggrClass);
           return new ImmutablePair<VectorAggregationDesc,String>(vecAggrDesc, null);
         }
@@ -4355,19 +4370,20 @@ public class Vectorizer implements PhysicalPlanResolver {
      */
     Class<? extends VectorAggregateExpression> vecAggrClass =
         findVecAggrClass(
-            vecAggrClasses, aggregateName, inputColVectorType,
+            vecAggrClasses, aggregationName, inputColVectorType,
             outputColVectorType, udafEvaluatorMode);
     if (vecAggrClass != null) {
       final VectorAggregationDesc vecAggrDesc =
           new VectorAggregationDesc(
-              aggrDesc, evaluator, inputTypeInfo, inputColVectorType, inputExpression,
+              aggregationName, evaluator, udafEvaluatorMode,
+              inputTypeInfo, inputColVectorType, inputExpression,
               outputTypeInfo, outputColVectorType, vecAggrClass);
       return new ImmutablePair<VectorAggregationDesc,String>(vecAggrDesc, null);
     }
 
     // No match?
     String issue =
-        "Vector aggregation : \"" + aggregateName + "\" " +
+        "Vector aggregation : \"" + aggregationName + "\" " +
             "for input type: " +
                  (inputColVectorType == null ? "any" : "\"" + inputColVectorType) + "\" " +
             "and output type: \"" + outputColVectorType + "\" " +

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java
index d170d86..5cb7061 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.util.StringUtils;
 
@@ -250,6 +251,25 @@ public class GenericUDAFAverage extends AbstractGenericUDAFResolver {
     VectorUDAFAvgDecimalPartial2.class, VectorUDAFAvgDecimalFinal.class})
   public static class GenericUDAFAverageEvaluatorDecimal extends AbstractGenericUDAFAverageEvaluator<HiveDecimal> {
 
+    private int resultPrecision = -1;
+    private int resultScale = -1;
+
+    @Override
+    public ObjectInspector init(Mode m, ObjectInspector[] parameters)
+        throws HiveException {
+
+      // Intercept result ObjectInspector so we can extract the DECIMAL precision and scale.
+      ObjectInspector resultOI = super.init(m, parameters);
+      if (m == Mode.COMPLETE || m == Mode.FINAL) {
+        DecimalTypeInfo decimalTypeInfo =
+            (DecimalTypeInfo)
+                TypeInfoUtils.getTypeInfoFromObjectInspector(resultOI);
+        resultPrecision = decimalTypeInfo.getPrecision();
+        resultScale = decimalTypeInfo.getScale();
+      }
+      return resultOI;
+    }
+
     @Override
     public void doReset(AverageAggregationBuffer<HiveDecimal> aggregation) throws HiveException {
       aggregation.count = 0;
@@ -336,6 +356,7 @@ public class GenericUDAFAverage extends AbstractGenericUDAFResolver {
       } else {
         HiveDecimalWritable result = new HiveDecimalWritable(HiveDecimal.ZERO);
         result.set(aggregation.sum.divide(HiveDecimal.create(aggregation.count)));
+        result.mutateEnforcePrecisionScale(resultPrecision, resultScale);
         return result;
       }
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
index c9fb3df..bb55d88 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
@@ -132,23 +132,29 @@ public class GenericUDAFVariance extends AbstractGenericUDAFResolver {
 
   /*
    * Calculate the variance family {VARIANCE, VARIANCE_SAMPLE, STANDARD_DEVIATION, or
-   * STANDARD_DEVIATION_STAMPLE) result when count > 1.  Public so vectorization code can
+   * STANDARD_DEVIATION_SAMPLE) result when count > 1.  Public so vectorization code can
    * use it, etc.
    */
   public static double calculateVarianceFamilyResult(double variance, long count,
       VarianceKind varianceKind) {
+    final double result;
     switch (varianceKind) {
     case VARIANCE:
-      return GenericUDAFVarianceEvaluator.calculateVarianceResult(variance, count);
+      result = GenericUDAFVarianceEvaluator.calculateVarianceResult(variance, count);
+      break;
     case VARIANCE_SAMPLE:
-      return GenericUDAFVarianceSampleEvaluator.calculateVarianceSampleResult(variance, count);
+      result = GenericUDAFVarianceSampleEvaluator.calculateVarianceSampleResult(variance, count);
+      break;
     case STANDARD_DEVIATION:
-      return GenericUDAFStdEvaluator.calculateStdResult(variance, count);
+      result = GenericUDAFStdEvaluator.calculateStdResult(variance, count);
+      break;
     case STANDARD_DEVIATION_SAMPLE:
-      return GenericUDAFStdSampleEvaluator.calculateStdSampleResult(variance, count);
+      result = GenericUDAFStdSampleEvaluator.calculateStdSampleResult(variance, count);
+      break;
     default:
       throw new RuntimeException("Unexpected variance kind " + varianceKind);
     }
+    return result;
   }
 
   @Override
@@ -381,7 +387,8 @@ public class GenericUDAFVariance extends AbstractGenericUDAFResolver {
      * Calculate the variance result when count > 1.  Public so vectorization code can use it, etc.
      */
     public static double calculateVarianceResult(double variance, long count) {
-      return variance / count;
+      final double result = variance / count;
+      return result;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
index ffdc410..fe1375b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorGroupByOperator.java
@@ -216,7 +216,10 @@ public class TestVectorGroupByOperator {
     vectorDesc.setVecAggrDescs(
         new VectorAggregationDesc[] {
           new VectorAggregationDesc(
-              agg, new GenericUDAFCount.GenericUDAFCountEvaluator(), null, ColumnVector.Type.NONE, null,
+              agg.getGenericUDAFName(),
+              new GenericUDAFCount.GenericUDAFCountEvaluator(),
+              agg.getMode(),
+              null, ColumnVector.Type.NONE, null,
               TypeInfoFactory.longTypeInfo, ColumnVector.Type.LONG, VectorUDAFCountStar.class)});
 
     vectorDesc.setProcessingMode(VectorGroupByDesc.ProcessingMode.HASH);
@@ -1555,7 +1558,7 @@ public class TestVectorGroupByOperator {
         "avg",
         2,
         Arrays.asList(new Long[]{}),
-        null);
+        0.0);
   }
 
   @Test
@@ -1564,12 +1567,12 @@ public class TestVectorGroupByOperator {
         "avg",
         2,
         Arrays.asList(new Long[]{null}),
-        null);
+        0.0);
     testAggregateLongAggregate(
         "avg",
         2,
         Arrays.asList(new Long[]{null, null, null}),
-        null);
+        0.0);
     testAggregateLongAggregate(
         "avg",
         2,
@@ -1601,7 +1604,7 @@ public class TestVectorGroupByOperator {
         null,
         4096,
         1024,
-        null);
+        0.0);
   }
 
   @SuppressWarnings("unchecked")
@@ -1632,7 +1635,7 @@ public class TestVectorGroupByOperator {
         "variance",
         2,
         Arrays.asList(new Long[]{}),
-        null);
+        0.0);
   }
 
   @Test
@@ -1650,12 +1653,12 @@ public class TestVectorGroupByOperator {
         "variance",
         2,
         Arrays.asList(new Long[]{null}),
-        null);
+        0.0);
     testAggregateLongAggregate(
         "variance",
         2,
         Arrays.asList(new Long[]{null, null, null}),
-        null);
+        0.0);
     testAggregateLongAggregate(
         "variance",
         2,
@@ -1680,7 +1683,7 @@ public class TestVectorGroupByOperator {
         null,
         4096,
         1024,
-        null);
+        0.0);
   }
 
   @Test
@@ -1708,7 +1711,7 @@ public class TestVectorGroupByOperator {
         "var_samp",
         2,
         Arrays.asList(new Long[]{}),
-        null);
+        0.0);
   }
 
 
@@ -1737,7 +1740,7 @@ public class TestVectorGroupByOperator {
         "std",
         2,
         Arrays.asList(new Long[]{}),
-        null);
+        0.0);
   }
 
 
@@ -1758,7 +1761,7 @@ public class TestVectorGroupByOperator {
         null,
         4096,
         1024,
-        null);
+        0.0);
   }
 
 
@@ -2236,14 +2239,21 @@ public class TestVectorGroupByOperator {
 
         assertEquals (true, vals[0] instanceof LongWritable);
         LongWritable lw = (LongWritable) vals[0];
-        assertFalse (lw.get() == 0L);
 
         if (vals[1] instanceof DoubleWritable) {
           DoubleWritable dw = (DoubleWritable) vals[1];
-          assertEquals (key, expected, dw.get() / lw.get());
+          if (lw.get() != 0L) {
+            assertEquals (key, expected, dw.get() / lw.get());
+          } else {
+            assertEquals(key, expected, 0.0);
+          }
         } else if (vals[1] instanceof HiveDecimalWritable) {
           HiveDecimalWritable hdw = (HiveDecimalWritable) vals[1];
-          assertEquals (key, expected, hdw.getHiveDecimal().divide(HiveDecimal.create(lw.get())));
+          if (lw.get() != 0L) {
+            assertEquals (key, expected, hdw.getHiveDecimal().divide(HiveDecimal.create(lw.get())));
+          } else {
+            assertEquals(key, expected, HiveDecimal.ZERO);
+          }
         }
       }
     }
@@ -2271,10 +2281,14 @@ public class TestVectorGroupByOperator {
         assertEquals (true, vals[1] instanceof DoubleWritable);
         assertEquals (true, vals[2] instanceof DoubleWritable);
         LongWritable cnt = (LongWritable) vals[0];
-        DoubleWritable sum = (DoubleWritable) vals[1];
-        DoubleWritable var = (DoubleWritable) vals[2];
-        assertTrue (1 <= cnt.get());
-        validateVariance (key, (Double) expected, cnt.get(), sum.get(), var.get());
+        if (cnt.get() == 0) {
+          assertEquals(key, expected, 0.0);
+        } else {
+          DoubleWritable sum = (DoubleWritable) vals[1];
+          DoubleWritable var = (DoubleWritable) vals[2];
+          assertTrue (1 <= cnt.get());
+          validateVariance (key, (Double) expected, cnt.get(), sum.get(), var.get());
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomBatchSource.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomBatchSource.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomBatchSource.java
index 4c2f872..dd2f8e3 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomBatchSource.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomBatchSource.java
@@ -167,6 +167,8 @@ public class VectorRandomBatchSource {
       VectorRandomRowSource vectorRandomRowSource,
       Object[][] randomRows) {
 
+    final boolean allowNull = vectorRandomRowSource.getAllowNull();
+
     List<VectorBatchPattern> vectorBatchPatternList = new ArrayList<VectorBatchPattern>();
     final int rowCount = randomRows.length;
     int rowIndex = 0;
@@ -201,35 +203,38 @@ public class VectorRandomBatchSource {
        */
       while (true) {
 
-        // Repeated NULL permutations.
         long columnPermutation = 1;
-        while (true) {
-          if (columnPermutation > columnPermutationLimit) {
-            break;
-          }
-          final int maximumRowCount = Math.min(rowCount - rowIndex, VectorizedRowBatch.DEFAULT_SIZE);
-          if (maximumRowCount == 0) {
-            break;
-          }
-          int randomRowCount = 1 + random.nextInt(maximumRowCount);
-          final int rowLimit = rowIndex + randomRowCount;
+        if (allowNull) {
 
-          BitSet bitSet = BitSet.valueOf(new long[]{columnPermutation});
+          // Repeated NULL permutations.
+          while (true) {
+            if (columnPermutation > columnPermutationLimit) {
+              break;
+            }
+            final int maximumRowCount = Math.min(rowCount - rowIndex, VectorizedRowBatch.DEFAULT_SIZE);
+            if (maximumRowCount == 0) {
+              break;
+            }
+            int randomRowCount = 1 + random.nextInt(maximumRowCount);
+            final int rowLimit = rowIndex + randomRowCount;
 
-          for (int columnNum = bitSet.nextSetBit(0);
-               columnNum >= 0;
-               columnNum = bitSet.nextSetBit(columnNum + 1)) {
+            BitSet bitSet = BitSet.valueOf(new long[]{columnPermutation});
 
-            // Repeated NULL fill down column.
-            for (int r = rowIndex; r < rowLimit; r++) {
-              randomRows[r][columnNum] = null;
+            for (int columnNum = bitSet.nextSetBit(0);
+                 columnNum >= 0;
+                 columnNum = bitSet.nextSetBit(columnNum + 1)) {
+
+              // Repeated NULL fill down column.
+              for (int r = rowIndex; r < rowLimit; r++) {
+                randomRows[r][columnNum] = null;
+              }
             }
+            vectorBatchPatternList.add(
+                VectorBatchPattern.createRepeatedBatch(
+                    random, randomRowCount, bitSet, asSelected));
+            columnPermutation++;
+            rowIndex = rowLimit;
           }
-          vectorBatchPatternList.add(
-              VectorBatchPattern.createRepeatedBatch(
-                  random, randomRowCount, bitSet, asSelected));
-          columnPermutation++;
-          rowIndex = rowLimit;
         }
 
         // Repeated non-NULL permutations.

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomRowSource.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomRowSource.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomRowSource.java
index 6181ae8..a1cefaa 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomRowSource.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/VectorRandomRowSource.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hive.ql.exec.vector;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.text.ParseException;
-
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -29,7 +28,6 @@ import java.util.Random;
 import java.util.Set;
 
 import org.apache.commons.lang.StringUtils;
-
 import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
 import org.apache.hadoop.hive.common.type.Date;
 import org.apache.hadoop.hive.common.type.HiveChar;
@@ -86,6 +84,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
 import org.apache.hive.common.util.DateUtils;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.BooleanWritable;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
 
@@ -130,6 +129,10 @@ public class VectorRandomRowSource {
   private boolean addEscapables;
   private String needsEscapeStr;
 
+  public boolean getAllowNull() {
+    return allowNull;
+  }
+
   public static class StringGenerationOption {
 
     private boolean generateSentences;
@@ -1021,43 +1024,141 @@ public class VectorRandomRowSource {
 
     switch (primitiveTypeInfo.getPrimitiveCategory()) {
     case BOOLEAN:
-      return ((WritableBooleanObjectInspector) objectInspector).create((boolean) object);
+      {
+        WritableBooleanObjectInspector writableOI = (WritableBooleanObjectInspector) objectInspector;
+        if (object instanceof Boolean) {
+          return writableOI.create((boolean) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case BYTE:
-      return ((WritableByteObjectInspector) objectInspector).create((byte) object);
+      {
+        WritableByteObjectInspector writableOI = (WritableByteObjectInspector) objectInspector;
+        if (object instanceof Byte) {
+          return writableOI.create((byte) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case SHORT:
-      return ((WritableShortObjectInspector) objectInspector).create((short) object);
+      {
+        WritableShortObjectInspector writableOI = (WritableShortObjectInspector) objectInspector;
+        if (object instanceof Short) {
+          return writableOI.create((short) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case INT:
-      return ((WritableIntObjectInspector) objectInspector).create((int) object);
+      {
+        WritableIntObjectInspector writableOI = (WritableIntObjectInspector) objectInspector;
+        if (object instanceof Integer) {
+          return writableOI.create((int) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case LONG:
-      return ((WritableLongObjectInspector) objectInspector).create((long) object);
+      {
+        WritableLongObjectInspector writableOI = (WritableLongObjectInspector) objectInspector;
+        if (object instanceof Long) {
+          return writableOI.create((long) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case DATE:
-      return ((WritableDateObjectInspector) objectInspector).create((Date) object);
+      {
+        WritableDateObjectInspector writableOI = (WritableDateObjectInspector) objectInspector;
+        if (object instanceof Date) {
+          return writableOI.create((Date) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case FLOAT:
-      return ((WritableFloatObjectInspector) objectInspector).create((float) object);
+      {
+        WritableFloatObjectInspector writableOI = (WritableFloatObjectInspector) objectInspector;
+        if (object instanceof Float) {
+          return writableOI.create((float) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case DOUBLE:
-      return ((WritableDoubleObjectInspector) objectInspector).create((double) object);
+      {
+        WritableDoubleObjectInspector writableOI = (WritableDoubleObjectInspector) objectInspector;
+        if (object instanceof Double) {
+          return writableOI.create((double) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case STRING:
-      return ((WritableStringObjectInspector) objectInspector).create((String) object);
+      {
+        WritableStringObjectInspector writableOI = (WritableStringObjectInspector) objectInspector;
+        if (object instanceof String) {
+          return writableOI.create((String) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case CHAR:
       {
         WritableHiveCharObjectInspector writableCharObjectInspector =
             new WritableHiveCharObjectInspector( (CharTypeInfo) primitiveTypeInfo);
-        return writableCharObjectInspector.create((HiveChar) object);
+        if (object instanceof HiveChar) {
+          return writableCharObjectInspector.create((HiveChar) object);
+        } else {
+          return writableCharObjectInspector.copyObject(object);
+        }
       }
     case VARCHAR:
       {
         WritableHiveVarcharObjectInspector writableVarcharObjectInspector =
             new WritableHiveVarcharObjectInspector( (VarcharTypeInfo) primitiveTypeInfo);
-        return writableVarcharObjectInspector.create((HiveVarchar) object);
+        if (object instanceof HiveVarchar) {
+          return writableVarcharObjectInspector.create((HiveVarchar) object);
+        } else {
+          return writableVarcharObjectInspector.copyObject(object);
+        }
       }
     case BINARY:
-      return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector.create((byte[]) object);
+      {
+        if (object instanceof byte[]) {
+          return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector.create((byte[]) object);
+        } else {
+          return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector.copyObject(object);
+        }
+      }
     case TIMESTAMP:
-      return ((WritableTimestampObjectInspector) objectInspector).create((Timestamp) object);
+    {
+      WritableTimestampObjectInspector writableOI = (WritableTimestampObjectInspector) objectInspector;
+      if (object instanceof Timestamp) {
+        return writableOI.create((Timestamp) object);
+      } else {
+        return writableOI.copyObject(object);
+      }
+    }
     case INTERVAL_YEAR_MONTH:
-      return ((WritableHiveIntervalYearMonthObjectInspector) objectInspector).create((HiveIntervalYearMonth) object);
+      {
+        WritableHiveIntervalYearMonthObjectInspector writableOI = (WritableHiveIntervalYearMonthObjectInspector) objectInspector;
+        if (object instanceof HiveIntervalYearMonth) {
+          return writableOI.create((HiveIntervalYearMonth) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case INTERVAL_DAY_TIME:
-      return ((WritableHiveIntervalDayTimeObjectInspector) objectInspector).create((HiveIntervalDayTime) object);
+      {
+        WritableHiveIntervalDayTimeObjectInspector writableOI = (WritableHiveIntervalDayTimeObjectInspector) objectInspector;
+        if (object instanceof HiveIntervalDayTime) {
+          return writableOI.create((HiveIntervalDayTime) object);
+        } else {
+          return writableOI.copyObject(object);
+        }
+      }
     case DECIMAL:
       {
         if (dataTypePhysicalVariation == dataTypePhysicalVariation.DECIMAL_64) {
@@ -1071,9 +1172,13 @@ public class VectorRandomRowSource {
           }
           return ((WritableLongObjectInspector) objectInspector).create(value);
         } else {
-          WritableHiveDecimalObjectInspector writableDecimalObjectInspector =
+          WritableHiveDecimalObjectInspector writableOI =
               new WritableHiveDecimalObjectInspector((DecimalTypeInfo) primitiveTypeInfo);
-          return writableDecimalObjectInspector.create((HiveDecimal) object);
+          if (object instanceof HiveDecimal) {
+            return writableOI.create((HiveDecimal) object);
+          } else {
+            return writableOI.copyObject(object);
+          }
         }
       }
     default:
@@ -1081,6 +1186,116 @@ public class VectorRandomRowSource {
     }
   }
 
+  public static Object getNonWritablePrimitiveObject(Object object, TypeInfo typeInfo,
+      ObjectInspector objectInspector) {
+
+    PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) typeInfo;
+    switch (primitiveTypeInfo.getPrimitiveCategory()) {
+    case BOOLEAN:
+      if (object instanceof Boolean) {
+        return object;
+      } else {
+        return ((WritableBooleanObjectInspector) objectInspector).get(object);
+      }
+    case BYTE:
+      if (object instanceof Byte) {
+        return object;
+      } else {
+        return ((WritableByteObjectInspector) objectInspector).get(object);
+      }
+    case SHORT:
+      if (object instanceof Short) {
+        return object;
+      } else {
+        return ((WritableShortObjectInspector) objectInspector).get(object);
+      }
+    case INT:
+      if (object instanceof Integer) {
+        return object;
+      } else {
+        return ((WritableIntObjectInspector) objectInspector).get(object);
+      }
+    case LONG:
+      if (object instanceof Long) {
+        return object;
+      } else {
+        return ((WritableLongObjectInspector) objectInspector).get(object);
+      }
+    case FLOAT:
+      if (object instanceof Float) {
+        return object;
+      } else {
+        return ((WritableFloatObjectInspector) objectInspector).get(object);
+      }
+    case DOUBLE:
+      if (object instanceof Double) {
+        return object;
+      } else {
+        return ((WritableDoubleObjectInspector) objectInspector).get(object);
+      }
+    case STRING:
+      if (object instanceof String) {
+        return object;
+      } else {
+        return ((WritableStringObjectInspector) objectInspector).getPrimitiveJavaObject(object);
+      }
+    case DATE:
+      if (object instanceof Date) {
+        return object;
+      } else {
+        return ((WritableDateObjectInspector) objectInspector).getPrimitiveJavaObject(object);
+      }
+    case TIMESTAMP:
+      if (object instanceof Timestamp) {
+        return object;
+      } else if (object instanceof org.apache.hadoop.hive.common.type.Timestamp) {
+        return object;
+      } else {
+        return ((WritableTimestampObjectInspector) objectInspector).getPrimitiveJavaObject(object);
+      }
+    case DECIMAL:
+      if (object instanceof HiveDecimal) {
+        return object;
+      } else {
+        WritableHiveDecimalObjectInspector writableDecimalObjectInspector =
+            new WritableHiveDecimalObjectInspector((DecimalTypeInfo) primitiveTypeInfo);
+        return writableDecimalObjectInspector.getPrimitiveJavaObject(object);
+      }
+    case VARCHAR:
+      if (object instanceof HiveVarchar) {
+        return object;
+      } else {
+        WritableHiveVarcharObjectInspector writableVarcharObjectInspector =
+            new WritableHiveVarcharObjectInspector( (VarcharTypeInfo) primitiveTypeInfo);
+        return writableVarcharObjectInspector.getPrimitiveJavaObject(object);
+      }
+    case CHAR:
+      if (object instanceof HiveChar) {
+        return object;
+      } else {
+        WritableHiveCharObjectInspector writableCharObjectInspector =
+            new WritableHiveCharObjectInspector( (CharTypeInfo) primitiveTypeInfo);
+        return writableCharObjectInspector.getPrimitiveJavaObject(object);
+      }
+    case INTERVAL_YEAR_MONTH:
+      if (object instanceof HiveIntervalYearMonth) {
+        return object;
+      } else {
+        return ((WritableHiveIntervalYearMonthObjectInspector) objectInspector).getPrimitiveJavaObject(object);
+      }
+    case INTERVAL_DAY_TIME:
+      if (object instanceof HiveIntervalDayTime) {
+        return object;
+      } else {
+        return ((WritableHiveIntervalDayTimeObjectInspector) objectInspector).getPrimitiveJavaObject(object);
+      }
+    case BINARY:
+    default:
+      throw new RuntimeException(
+          "Unexpected primitive category " + primitiveTypeInfo.getPrimitiveCategory());
+    }
+  }
+
   public Object randomWritable(int column) {
     return randomWritable(
         typeInfos[column], objectInspectorList.get(column), dataTypePhysicalVariations[column],

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/AggregationBase.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/AggregationBase.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/AggregationBase.java
new file mode 100644
index 0000000..583241c
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/AggregationBase.java
@@ -0,0 +1,473 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.aggregation;
+
+import java.lang.reflect.Constructor;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationBufferRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorAggregationDesc;
+import org.apache.hadoop.hive.ql.exec.vector.VectorExtractRow;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizationContext;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
+import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.optimizer.physical.Vectorizer;
+import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer;
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+
+import junit.framework.Assert;
+
+public class AggregationBase {
+
+  public enum AggregationTestMode {
+    ROW_MODE,
+    VECTOR_EXPRESSION;
+
+    static final int count = values().length;
+  }
+
+  public static GenericUDAFEvaluator getEvaluator(String aggregationFunctionName,
+      TypeInfo typeInfo)
+      throws SemanticException {
+
+    GenericUDAFResolver resolver =
+        FunctionRegistry.getGenericUDAFResolver(aggregationFunctionName);
+    TypeInfo[] parameters = new TypeInfo[] { typeInfo };
+    GenericUDAFEvaluator evaluator = resolver.getEvaluator(parameters);
+    return evaluator;
+  }
+
+  protected static boolean doRowTest(TypeInfo typeInfo,
+      GenericUDAFEvaluator evaluator, TypeInfo outputTypeInfo,
+      GenericUDAFEvaluator.Mode udafEvaluatorMode, int maxKeyCount,
+      List<String> columns, List<ExprNodeDesc> children,
+      Object[][] randomRows, ObjectInspector rowInspector,
+      Object[] results)
+          throws Exception {
+
+    /*
+    System.out.println(
+        "*DEBUG* typeInfo " + typeInfo.toString() +
+        " aggregationTestMode ROW_MODE" +
+        " outputTypeInfo " + outputTypeInfo.toString());
+    */
+
+    // Last entry is for a NULL key.
+    AggregationBuffer[] aggregationBuffers = new AggregationBuffer[maxKeyCount + 1];
+
+    ObjectInspector objectInspector = TypeInfoUtils
+        .getStandardWritableObjectInspectorFromTypeInfo(outputTypeInfo);
+
+    Object[] parameterArray = new Object[1];
+    final int rowCount = randomRows.length;
+    for (int i = 0; i < rowCount; i++) {
+      Object[] row = randomRows[i];
+      ShortWritable shortWritable = (ShortWritable) row[0];
+
+      final int key;
+      if (shortWritable == null) {
+        key = maxKeyCount;
+      } else {
+        key = shortWritable.get();
+      }
+      AggregationBuffer aggregationBuffer = aggregationBuffers[key];
+      if (aggregationBuffer == null) {
+        aggregationBuffer = evaluator.getNewAggregationBuffer();
+        aggregationBuffers[key] = aggregationBuffer;
+      }
+      parameterArray[0] = row[1];
+      evaluator.aggregate(aggregationBuffer, parameterArray);
+    }
+
+    final boolean isPrimitive = (outputTypeInfo instanceof PrimitiveTypeInfo);
+    final boolean isPartial =
+        (udafEvaluatorMode == GenericUDAFEvaluator.Mode.PARTIAL1 ||
+         udafEvaluatorMode == GenericUDAFEvaluator.Mode.PARTIAL2);
+
+    for (short key = 0; key < maxKeyCount + 1; key++) {
+      AggregationBuffer aggregationBuffer = aggregationBuffers[key];
+      if (aggregationBuffer != null) {
+        final Object result;
+        if (isPartial) {
+          result = evaluator.terminatePartial(aggregationBuffer);
+        } else {
+          result = evaluator.terminate(aggregationBuffer);
+        }
+        Object copyResult;
+        if (result == null) {
+          copyResult = null;
+        } else if (isPrimitive) {
+          copyResult =
+              VectorRandomRowSource.getWritablePrimitiveObject(
+                  (PrimitiveTypeInfo) outputTypeInfo, objectInspector, result);
+        } else {
+          copyResult =
+              ObjectInspectorUtils.copyToStandardObject(
+                  result, objectInspector, ObjectInspectorCopyOption.WRITABLE);
+        }
+        results[key] = copyResult;
+      }
+    }
+
+    return true;
+  }
+
+  private static void extractResultObjects(VectorizedRowBatch outputBatch, short[] keys,
+      VectorExtractRow resultVectorExtractRow, TypeInfo outputTypeInfo, Object[] scrqtchRow,
+      Object[] results) {
+
+    final boolean isPrimitive = (outputTypeInfo instanceof PrimitiveTypeInfo);
+    ObjectInspector objectInspector;
+    if (isPrimitive) {
+      objectInspector = TypeInfoUtils
+          .getStandardWritableObjectInspectorFromTypeInfo(outputTypeInfo);
+    } else {
+      objectInspector = null;
+    }
+
+    for (int batchIndex = 0; batchIndex < outputBatch.size; batchIndex++) {
+      resultVectorExtractRow.extractRow(outputBatch, batchIndex, scrqtchRow);
+      if (isPrimitive) {
+        Object copyResult =
+            ObjectInspectorUtils.copyToStandardObject(
+                scrqtchRow[0], objectInspector, ObjectInspectorCopyOption.WRITABLE);
+        results[keys[batchIndex]] = copyResult;
+      } else {
+        results[keys[batchIndex]] = scrqtchRow[0];
+      }
+    }
+  }
+
+  protected static boolean doVectorTest(String aggregationName, TypeInfo typeInfo,
+      GenericUDAFEvaluator evaluator, TypeInfo outputTypeInfo,
+      GenericUDAFEvaluator.Mode udafEvaluatorMode, int maxKeyCount,
+      List<String> columns, String[] columnNames,
+      TypeInfo[] typeInfos, DataTypePhysicalVariation[] dataTypePhysicalVariations,
+      List<ExprNodeDesc> parameterList,
+      VectorRandomBatchSource batchSource,
+      Object[] results)
+          throws Exception {
+
+    HiveConf hiveConf = new HiveConf();
+
+    VectorizationContext vectorizationContext =
+        new VectorizationContext(
+            "name",
+            columns,
+            Arrays.asList(typeInfos),
+            Arrays.asList(dataTypePhysicalVariations),
+            hiveConf);
+
+    ImmutablePair<VectorAggregationDesc,String> pair =
+        Vectorizer.getVectorAggregationDesc(
+            aggregationName,
+            parameterList,
+            evaluator,
+            outputTypeInfo,
+            udafEvaluatorMode,
+            vectorizationContext);
+    VectorAggregationDesc vecAggrDesc = pair.left;
+    if (vecAggrDesc == null) {
+      Assert.fail(
+          "No vector aggregation expression found for aggregationName " + aggregationName +
+          " udafEvaluatorMode " + udafEvaluatorMode +
+          " parameterList " + parameterList +
+          " outputTypeInfo " + outputTypeInfo);
+    }
+
+    Class<? extends VectorAggregateExpression> vecAggrClass = vecAggrDesc.getVecAggrClass();
+
+    Constructor<? extends VectorAggregateExpression> ctor = null;
+    try {
+      ctor = vecAggrClass.getConstructor(VectorAggregationDesc.class);
+    } catch (Exception e) {
+      throw new HiveException("Constructor " + vecAggrClass.getSimpleName() +
+          "(VectorAggregationDesc) not available");
+    }
+    VectorAggregateExpression vecAggrExpr = null;
+    try {
+      vecAggrExpr = ctor.newInstance(vecAggrDesc);
+    } catch (Exception e) {
+
+       throw new HiveException("Failed to create " + vecAggrClass.getSimpleName() +
+           "(VectorAggregationDesc) object ", e);
+    }
+    VectorExpression.doTransientInit(vecAggrExpr.getInputExpression());
+
+    /*
+    System.out.println(
+        "*DEBUG* typeInfo " + typeInfo.toString() +
+        " aggregationTestMode VECTOR_MODE" +
+        " vecAggrExpr " + vecAggrExpr.getClass().getSimpleName());
+    */
+
+    VectorRandomRowSource rowSource = batchSource.getRowSource();
+    VectorizedRowBatchCtx batchContext =
+        new VectorizedRowBatchCtx(
+            columnNames,
+            rowSource.typeInfos(),
+            rowSource.dataTypePhysicalVariations(),
+            /* dataColumnNums */ null,
+            /* partitionColumnCount */ 0,
+            /* virtualColumnCount */ 0,
+            /* neededVirtualColumns */ null,
+            vectorizationContext.getScratchColumnTypeNames(),
+            vectorizationContext.getScratchDataTypePhysicalVariations());
+
+    VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();
+
+    // Last entry is for a NULL key.
+    VectorAggregationBufferRow[] vectorAggregationBufferRows =
+        new VectorAggregationBufferRow[maxKeyCount + 1];
+
+    VectorAggregationBufferRow[] batchBufferRows;
+
+    batchSource.resetBatchIteration();
+    int rowIndex = 0;
+    while (true) {
+      if (!batchSource.fillNextBatch(batch)) {
+        break;
+      }
+      LongColumnVector keyLongColVector = (LongColumnVector) batch.cols[0];
+
+      batchBufferRows =
+          new VectorAggregationBufferRow[VectorizedRowBatch.DEFAULT_SIZE];
+
+      final int size = batch.size;
+      boolean selectedInUse = batch.selectedInUse;
+      int[] selected = batch.selected;
+      for (int logical = 0; logical < size; logical++) {
+        final int batchIndex = (selectedInUse ? selected[logical] : logical);
+        final int keyAdjustedBatchIndex;
+        if (keyLongColVector.isRepeating) {
+          keyAdjustedBatchIndex = 0;
+        } else {
+          keyAdjustedBatchIndex = batchIndex;
+        }
+        final short key;
+        if (keyLongColVector.noNulls || !keyLongColVector.isNull[keyAdjustedBatchIndex]) {
+          key = (short) keyLongColVector.vector[keyAdjustedBatchIndex];
+        } else {
+          key = (short) maxKeyCount;
+        }
+
+        VectorAggregationBufferRow bufferRow = vectorAggregationBufferRows[key];
+        if (bufferRow == null) {
+          VectorAggregateExpression.AggregationBuffer aggregationBuffer =
+              vecAggrExpr.getNewAggregationBuffer();
+          aggregationBuffer.reset();
+          VectorAggregateExpression.AggregationBuffer[] aggregationBuffers =
+              new VectorAggregateExpression.AggregationBuffer[] { aggregationBuffer };
+          bufferRow = new VectorAggregationBufferRow(aggregationBuffers);
+          vectorAggregationBufferRows[key] = bufferRow;
+        }
+        batchBufferRows[logical] = bufferRow;
+      }
+
+      vecAggrExpr.aggregateInputSelection(
+          batchBufferRows,
+          0,
+          batch);
+
+      rowIndex += batch.size;
+    }
+
+    String[] outputColumnNames = new String[] { "output" };
+
+    TypeInfo[] outputTypeInfos = new TypeInfo[] { outputTypeInfo };
+    VectorizedRowBatchCtx outputBatchContext =
+        new VectorizedRowBatchCtx(
+            outputColumnNames,
+            outputTypeInfos,
+            null,
+            /* dataColumnNums */ null,
+            /* partitionColumnCount */ 0,
+            /* virtualColumnCount */ 0,
+            /* neededVirtualColumns */ null,
+            new String[0],
+            new DataTypePhysicalVariation[0]);
+
+    VectorizedRowBatch outputBatch = outputBatchContext.createVectorizedRowBatch();
+
+    short[] keys = new short[VectorizedRowBatch.DEFAULT_SIZE];
+
+    VectorExtractRow resultVectorExtractRow = new VectorExtractRow();
+    resultVectorExtractRow.init(
+        new TypeInfo[] { outputTypeInfo }, new int[] { 0 });
+    Object[] scrqtchRow = new Object[1];
+
+    for (short key = 0; key < maxKeyCount + 1; key++) {
+      VectorAggregationBufferRow vectorAggregationBufferRow = vectorAggregationBufferRows[key];
+      if (vectorAggregationBufferRow != null) {
+        if (outputBatch.size == VectorizedRowBatch.DEFAULT_SIZE) {
+          extractResultObjects(outputBatch, keys, resultVectorExtractRow, outputTypeInfo,
+              scrqtchRow, results);
+          outputBatch.reset();
+        }
+        keys[outputBatch.size] = key;
+        VectorAggregateExpression.AggregationBuffer aggregationBuffer =
+            vectorAggregationBufferRow.getAggregationBuffer(0);
+        vecAggrExpr.assignRowColumn(outputBatch, outputBatch.size++, 0, aggregationBuffer);
+      }
+    }
+    if (outputBatch.size > 0) {
+      extractResultObjects(outputBatch, keys, resultVectorExtractRow, outputTypeInfo,
+          scrqtchRow, results);
+    }
+
+    return true;
+  }
+
+  private boolean compareObjects(Object object1, Object object2, TypeInfo typeInfo,
+      ObjectInspector objectInspector) {
+    if (typeInfo instanceof PrimitiveTypeInfo) {
+      return
+          VectorRandomRowSource.getWritablePrimitiveObject(
+              (PrimitiveTypeInfo) typeInfo, objectInspector, object1).equals(
+                  VectorRandomRowSource.getWritablePrimitiveObject(
+                      (PrimitiveTypeInfo) typeInfo, objectInspector, object2));
+    } else {
+      return object1.equals(object2);
+    }
+  }
+
+  protected void executeAggregationTests(String aggregationName, TypeInfo typeInfo,
+      GenericUDAFEvaluator evaluator,
+      TypeInfo outputTypeInfo, GenericUDAFEvaluator.Mode udafEvaluatorMode,
+      int maxKeyCount, List<String> columns, String[] columnNames,
+      List<ExprNodeDesc> parameters, Object[][] randomRows,
+      VectorRandomRowSource rowSource, VectorRandomBatchSource batchSource,
+      Object[] resultsArray)
+          throws Exception {
+
+    for (int i = 0; i < AggregationTestMode.count; i++) {
+
+      // Last entry is for a NULL key.
+      Object[] results = new Object[maxKeyCount + 1];
+      resultsArray[i] = results;
+
+      AggregationTestMode aggregationTestMode = AggregationTestMode.values()[i];
+      switch (aggregationTestMode) {
+      case ROW_MODE:
+        if (!doRowTest(
+              typeInfo,
+              evaluator,
+              outputTypeInfo,
+              udafEvaluatorMode,
+              maxKeyCount,
+              columns,
+              parameters,
+              randomRows,
+              rowSource.rowStructObjectInspector(),
+              results)) {
+          return;
+        }
+        break;
+      case VECTOR_EXPRESSION:
+        if (!doVectorTest(
+              aggregationName,
+              typeInfo,
+              evaluator,
+              outputTypeInfo,
+              udafEvaluatorMode,
+              maxKeyCount,
+              columns,
+              columnNames,
+              rowSource.typeInfos(),
+              rowSource.dataTypePhysicalVariations(),
+              parameters,
+              batchSource,
+              results)) {
+          return;
+        }
+        break;
+      default:
+        throw new RuntimeException(
+            "Unexpected Hash Aggregation test mode " + aggregationTestMode);
+      }
+    }
+  }
+
+  protected void verifyAggregationResults(TypeInfo typeInfo, TypeInfo outputTypeInfo,
+      int maxKeyCount, GenericUDAFEvaluator.Mode udafEvaluatorMode,
+      Object[] resultsArray) {
+
+    // Row-mode is the expected results.
+    Object[] expectedResults = (Object[]) resultsArray[0];
+
+    ObjectInspector objectInspector = TypeInfoUtils
+        .getStandardWritableObjectInspectorFromTypeInfo(outputTypeInfo);
+
+    for (int v = 1; v < AggregationTestMode.count; v++) {
+      Object[] vectorResults = (Object[]) resultsArray[v];
+
+      for (short key = 0; key < maxKeyCount + 1; key++) {
+        Object expectedResult = expectedResults[key];
+        Object vectorResult = vectorResults[key];
+        if (expectedResult == null || vectorResult == null) {
+          if (expectedResult != null || vectorResult != null) {
+            Assert.fail(
+                "Key " + key +
+                " typeName " + typeInfo.getTypeName() +
+                " outputTypeName " + outputTypeInfo.getTypeName() +
+                " " + AggregationTestMode.values()[v] +
+                " result is NULL " + (vectorResult == null ? "YES" : "NO result " + vectorResult.toString()) +
+                " does not match row-mode expected result is NULL " +
+                (expectedResult == null ? "YES" : "NO result " + expectedResult.toString()) +
+                " udafEvaluatorMode " + udafEvaluatorMode);
+          }
+        } else {
+          if (!compareObjects(expectedResult, vectorResult, outputTypeInfo, objectInspector)) {
+            Assert.fail(
+                "Key " + key +
+              " typeName " + typeInfo.getTypeName() +
+              " outputTypeName " + outputTypeInfo.getTypeName() +
+                " " + AggregationTestMode.values()[v] +
+                " result " + vectorResult.toString() +
+                " (" + vectorResult.getClass().getSimpleName() + ")" +
+                " does not match row-mode expected result " + expectedResult.toString() +
+                " (" + expectedResult.getClass().getSimpleName() + ")" +
+                " udafEvaluatorMode " + udafEvaluatorMode);
+          }
+        }
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/TestVectorAggregation.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/TestVectorAggregation.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/TestVectorAggregation.java
new file mode 100644
index 0000000..c5f0483
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/aggregation/TestVectorAggregation.java
@@ -0,0 +1,664 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.exec.vector.aggregation;
+
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.sql.Timestamp;
+
+import org.apache.hadoop.hive.common.type.DataTypePhysicalVariation;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomBatchSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource;
+import org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource.GenerationSpec;
+import org.apache.hadoop.hive.ql.exec.vector.expressions.aggregates.VectorAggregateExpression;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFVariance;
+import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
+import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableShortObjectInspector;
+import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+import org.apache.hadoop.hive.serde2.io.ShortWritable;
+
+import junit.framework.Assert;
+
+import org.junit.Ignore;
+import org.junit.Test;
+
+public class TestVectorAggregation extends AggregationBase {
+
+  @Test
+  public void testAvgIntegers() throws Exception {
+    Random random = new Random(7743);
+
+    doIntegerTests("avg", random);
+  }
+
+  @Test
+  public void testAvgFloating() throws Exception {
+    Random random = new Random(7743);
+
+    doFloatingTests("avg", random);
+  }
+
+  @Test
+  public void testAvgDecimal() throws Exception {
+    Random random = new Random(7743);
+
+    doDecimalTests("avg", random);
+  }
+
+  @Test
+  public void testAvgTimestamp() throws Exception {
+    Random random = new Random(7743);
+
+    doTests(
+        random, "avg", TypeInfoFactory.timestampTypeInfo);
+  }
+
+  @Test
+  public void testCount() throws Exception {
+    Random random = new Random(7743);
+
+    doTests(
+        random, "count", TypeInfoFactory.shortTypeInfo);
+    doTests(
+        random, "count", TypeInfoFactory.longTypeInfo);
+    doTests(
+        random, "count", TypeInfoFactory.doubleTypeInfo);
+    doTests(
+        random, "count", new DecimalTypeInfo(18, 10));
+    doTests(
+        random, "count", TypeInfoFactory.stringTypeInfo);
+  }
+
+  @Test
+  public void testMax() throws Exception {
+    Random random = new Random(7743);
+
+    doIntegerTests("max", random);
+    doFloatingTests("max", random);
+    doDecimalTests("max", random);
+
+    doTests(
+        random, "max", TypeInfoFactory.timestampTypeInfo);
+    doTests(
+        random, "max", TypeInfoFactory.intervalDayTimeTypeInfo);
+
+    doStringFamilyTests("max", random);
+  }
+
+  @Test
+  public void testMin() throws Exception {
+    Random random = new Random(7743);
+
+    doIntegerTests("min", random);
+    doFloatingTests("min", random);
+    doDecimalTests("min", random);
+
+    doTests(
+        random, "min", TypeInfoFactory.timestampTypeInfo);
+    doTests(
+        random, "min", TypeInfoFactory.intervalDayTimeTypeInfo);
+
+    doStringFamilyTests("min", random);
+  }
+
+  @Test
+  public void testSum() throws Exception {
+    Random random = new Random(7743);
+
+    doTests(
+        random, "sum", TypeInfoFactory.shortTypeInfo);
+    doTests(
+        random, "sum", TypeInfoFactory.longTypeInfo);
+    doTests(
+        random, "sum", TypeInfoFactory.doubleTypeInfo);
+
+    doDecimalTests("sum", random);
+  }
+
+  private final static Set<String> varianceNames =
+      GenericUDAFVariance.VarianceKind.nameMap.keySet();
+
+  @Test
+  public void testVarianceIntegers() throws Exception {
+    Random random = new Random(7743);
+
+    for (String aggregationName : varianceNames) {
+      doIntegerTests(aggregationName, random);
+    }
+  }
+
+  @Test
+  public void testVarianceFloating() throws Exception {
+    Random random = new Random(7743);
+
+    for (String aggregationName : varianceNames) {
+      doFloatingTests(aggregationName, random);
+    }
+  }
+
+  @Test
+  public void testVarianceDecimal() throws Exception {
+    Random random = new Random(7743);
+
+    for (String aggregationName : varianceNames) {
+      doDecimalTests(aggregationName, random);
+    }
+  }
+
+  private static TypeInfo[] integerTypeInfos = new TypeInfo[] {
+    TypeInfoFactory.byteTypeInfo,
+    TypeInfoFactory.shortTypeInfo,
+    TypeInfoFactory.intTypeInfo,
+    TypeInfoFactory.longTypeInfo
+  };
+
+  // We have test failures with FLOAT.  Ignoring this issue for now.
+  private static TypeInfo[] floatingTypeInfos = new TypeInfo[] {
+    // TypeInfoFactory.floatTypeInfo,
+    TypeInfoFactory.doubleTypeInfo
+  };
+
+  private void doIntegerTests(String aggregationName, Random random)
+          throws Exception {
+    for (TypeInfo typeInfo : integerTypeInfos) {
+      doTests(
+          random, aggregationName, typeInfo);
+    }
+  }
+
+  private void doFloatingTests(String aggregationName, Random random)
+      throws Exception {
+    for (TypeInfo typeInfo : floatingTypeInfos) {
+      doTests(
+          random, aggregationName, typeInfo);
+    }
+  }
+
+  private static TypeInfo[] decimalTypeInfos = new TypeInfo[] {
+    new DecimalTypeInfo(38, 18),
+    new DecimalTypeInfo(25, 2),
+    new DecimalTypeInfo(19, 4),
+    new DecimalTypeInfo(18, 10),
+    new DecimalTypeInfo(17, 3),
+    new DecimalTypeInfo(12, 2),
+    new DecimalTypeInfo(7, 1)
+  };
+
+  private void doDecimalTests(String aggregationName, Random random)
+      throws Exception {
+    for (TypeInfo typeInfo : decimalTypeInfos) {
+      doTests(
+          random, aggregationName, typeInfo);
+    }
+  }
+
+  private static TypeInfo[] stringFamilyTypeInfos = new TypeInfo[] {
+    TypeInfoFactory.stringTypeInfo,
+    new CharTypeInfo(25),
+    new CharTypeInfo(10),
+    new VarcharTypeInfo(20),
+    new VarcharTypeInfo(15)
+  };
+
+  private void doStringFamilyTests(String aggregationName, Random random)
+      throws Exception {
+    for (TypeInfo typeInfo : stringFamilyTypeInfos) {
+      doTests(
+          random, aggregationName, typeInfo);
+    }
+  }
+
+  public static int getLinearRandomNumber(Random random, int maxSize) {
+    //Get a linearly multiplied random number
+    int randomMultiplier = maxSize * (maxSize + 1) / 2;
+    int randomInt = random.nextInt(randomMultiplier);
+
+    //Linearly iterate through the possible values to find the correct one
+    int linearRandomNumber = 0;
+    for(int i=maxSize; randomInt >= 0; i--){
+        randomInt -= i;
+        linearRandomNumber++;
+    }
+
+    return linearRandomNumber;
+  }
+
+  private static final int TEST_ROW_COUNT = 100000;
+
+  private void doMerge(GenericUDAFEvaluator.Mode mergeUdafEvaluatorMode,
+      Random random,
+      String aggregationName,
+      TypeInfo typeInfo,
+      GenerationSpec keyGenerationSpec,
+      List<String> columns, String[] columnNames,
+      int dataAggrMaxKeyCount, int reductionFactor,
+      TypeInfo partial1OutputTypeInfo,
+      Object[] partial1ResultsArray)
+          throws Exception {
+
+    List<GenerationSpec> mergeAggrGenerationSpecList = new ArrayList<GenerationSpec>();
+    List<DataTypePhysicalVariation> mergeDataTypePhysicalVariationList =
+        new ArrayList<DataTypePhysicalVariation>();
+
+    mergeAggrGenerationSpecList.add(keyGenerationSpec);
+    mergeDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
+
+    // Use OMIT for both.  We will fill in the data from the PARTIAL1 results.
+    GenerationSpec mergeGenerationSpec =
+        GenerationSpec.createOmitGeneration(partial1OutputTypeInfo);
+    mergeAggrGenerationSpecList.add(mergeGenerationSpec);
+    mergeDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
+
+    ExprNodeColumnDesc mergeCol1Expr =
+        new ExprNodeColumnDesc(partial1OutputTypeInfo, "col1", "table", false);
+    List<ExprNodeDesc> mergeParameters = new ArrayList<ExprNodeDesc>();
+    mergeParameters.add(mergeCol1Expr);
+    final int mergeParameterCount = mergeParameters.size();
+    ObjectInspector[] mergeParameterObjectInspectors =
+        new ObjectInspector[mergeParameterCount];
+    for (int i = 0; i < mergeParameterCount; i++) {
+      TypeInfo paramTypeInfo = mergeParameters.get(i).getTypeInfo();
+      mergeParameterObjectInspectors[i] = TypeInfoUtils
+          .getStandardWritableObjectInspectorFromTypeInfo(paramTypeInfo);
+    }
+
+    VectorRandomRowSource mergeRowSource = new VectorRandomRowSource();
+
+    mergeRowSource.initGenerationSpecSchema(
+        random, mergeAggrGenerationSpecList, /* maxComplexDepth */ 0, /* allowNull */ false,
+        mergeDataTypePhysicalVariationList);
+
+    Object[][] mergeRandomRows = mergeRowSource.randomRows(TEST_ROW_COUNT);
+
+    // Reduce the key range to cause there to be work for each PARTIAL2 key.
+    final int mergeMaxKeyCount = dataAggrMaxKeyCount / reductionFactor;
+
+    Object[] partial1Results = (Object[]) partial1ResultsArray[0];
+
+    short partial1Key = 0;
+    for (int i = 0; i < mergeRandomRows.length; i++) {
+      // Find a non-NULL entry...
+      while (true) {
+        if (partial1Key >= dataAggrMaxKeyCount) {
+          partial1Key = 0;
+        }
+        if (partial1Results[partial1Key] != null) {
+          break;
+        }
+        partial1Key++;
+      }
+      final short mergeKey = (short) (partial1Key % mergeMaxKeyCount);
+      mergeRandomRows[i][0] = new ShortWritable(mergeKey);
+      mergeRandomRows[i][1] = partial1Results[partial1Key];
+      partial1Key++;
+    }
+
+    VectorRandomBatchSource mergeBatchSource =
+        VectorRandomBatchSource.createInterestingBatches(
+            random,
+            mergeRowSource,
+            mergeRandomRows,
+            null);
+
+    // We need to pass the original TypeInfo in for initializing the evaluator.
+    GenericUDAFEvaluator mergeEvaluator =
+        getEvaluator(aggregationName, typeInfo);
+
+    /*
+    System.out.println(
+        "*DEBUG* GenericUDAFEvaluator for " + aggregationName + ", " + typeInfo.getTypeName() + ": " +
+            mergeEvaluator.getClass().getSimpleName());
+    */
+
+    // The only way to get the return object inspector (and its return type) is to
+    // initialize it...
+
+    ObjectInspector mergeReturnOI =
+        mergeEvaluator.init(
+            mergeUdafEvaluatorMode,
+            mergeParameterObjectInspectors);
+    TypeInfo mergeOutputTypeInfo =
+        TypeInfoUtils.getTypeInfoFromObjectInspector(mergeReturnOI);
+
+    Object[] mergeResultsArray = new Object[AggregationTestMode.count];
+
+    executeAggregationTests(
+        aggregationName,
+        partial1OutputTypeInfo,
+        mergeEvaluator,
+        mergeOutputTypeInfo,
+        mergeUdafEvaluatorMode,
+        mergeMaxKeyCount,
+        columns,
+        columnNames,
+        mergeParameters,
+        mergeRandomRows,
+        mergeRowSource,
+        mergeBatchSource,
+        mergeResultsArray);
+
+    verifyAggregationResults(
+        partial1OutputTypeInfo,
+        mergeOutputTypeInfo,
+        mergeMaxKeyCount,
+        mergeUdafEvaluatorMode,
+        mergeResultsArray);
+  }
+
+  private void doTests(Random random, String aggregationName, TypeInfo typeInfo)
+      throws Exception {
+
+    List<GenerationSpec> dataAggrGenerationSpecList = new ArrayList<GenerationSpec>();
+    List<DataTypePhysicalVariation> explicitDataTypePhysicalVariationList =
+        new ArrayList<DataTypePhysicalVariation>();
+
+    TypeInfo keyTypeInfo = TypeInfoFactory.shortTypeInfo;
+    GenerationSpec keyGenerationSpec = GenerationSpec.createOmitGeneration(keyTypeInfo);
+    dataAggrGenerationSpecList.add(keyGenerationSpec);
+    explicitDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
+
+    GenerationSpec generationSpec = GenerationSpec.createSameType(typeInfo);
+    dataAggrGenerationSpecList.add(generationSpec);
+    explicitDataTypePhysicalVariationList.add(DataTypePhysicalVariation.NONE);
+
+    List<String> columns = new ArrayList<String>();
+    columns.add("col0");
+    columns.add("col1");
+
+    ExprNodeColumnDesc dataAggrCol1Expr = new ExprNodeColumnDesc(typeInfo, "col1", "table", false);
+    List<ExprNodeDesc> dataAggrParameters = new ArrayList<ExprNodeDesc>();
+    dataAggrParameters.add(dataAggrCol1Expr);
+    final int dataAggrParameterCount = dataAggrParameters.size();
+    ObjectInspector[] dataAggrParameterObjectInspectors = new ObjectInspector[dataAggrParameterCount];
+    for (int i = 0; i < dataAggrParameterCount; i++) {
+      TypeInfo paramTypeInfo = dataAggrParameters.get(i).getTypeInfo();
+      dataAggrParameterObjectInspectors[i] = TypeInfoUtils
+          .getStandardWritableObjectInspectorFromTypeInfo(paramTypeInfo);
+    }
+
+    String[] columnNames = columns.toArray(new String[0]);
+
+    final int dataAggrMaxKeyCount = 20000;
+    final int reductionFactor = 16;
+
+    ObjectInspector keyObjectInspector = VectorRandomRowSource.getObjectInspector(keyTypeInfo);
+
+    /*
+     * PARTIAL1.
+     */
+
+    VectorRandomRowSource partial1RowSource = new VectorRandomRowSource();
+
+    partial1RowSource.initGenerationSpecSchema(
+        random, dataAggrGenerationSpecList, /* maxComplexDepth */ 0, /* allowNull */ true,
+        explicitDataTypePhysicalVariationList);
+
+    Object[][] partial1RandomRows = partial1RowSource.randomRows(TEST_ROW_COUNT);
+
+    final int partial1RowCount = partial1RandomRows.length;
+    for (int i = 0; i < partial1RowCount; i++) {
+      final short shortKey = (short) getLinearRandomNumber(random, dataAggrMaxKeyCount);
+      partial1RandomRows[i][0] =
+         ((WritableShortObjectInspector) keyObjectInspector).create((short) shortKey);
+    }
+
+    VectorRandomBatchSource partial1BatchSource =
+        VectorRandomBatchSource.createInterestingBatches(
+            random,
+            partial1RowSource,
+            partial1RandomRows,
+            null);
+
+    GenericUDAFEvaluator partial1Evaluator = getEvaluator(aggregationName, typeInfo);
+
+    /*
+    System.out.println(
+        "*DEBUG* GenericUDAFEvaluator for " + aggregationName + ", " + typeInfo.getTypeName() + ": " +
+            partial1Evaluator.getClass().getSimpleName());
+    */
+
+    // The only way to get the return object inspector (and its return type) is to
+    // initialize it...
+    final GenericUDAFEvaluator.Mode partial1UdafEvaluatorMode = GenericUDAFEvaluator.Mode.PARTIAL1;
+    ObjectInspector partial1ReturnOI =
+        partial1Evaluator.init(
+            partial1UdafEvaluatorMode,
+            dataAggrParameterObjectInspectors);
+    TypeInfo partial1OutputTypeInfo =
+        TypeInfoUtils.getTypeInfoFromObjectInspector(partial1ReturnOI);
+
+    Object[] partial1ResultsArray = new Object[AggregationTestMode.count];
+
+    executeAggregationTests(
+        aggregationName,
+        typeInfo,
+        partial1Evaluator,
+        partial1OutputTypeInfo,
+        partial1UdafEvaluatorMode,
+        dataAggrMaxKeyCount,
+        columns,
+        columnNames,
+        dataAggrParameters,
+        partial1RandomRows,
+        partial1RowSource,
+        partial1BatchSource,
+        partial1ResultsArray);
+
+    verifyAggregationResults(
+        typeInfo,
+        partial1OutputTypeInfo,
+        dataAggrMaxKeyCount,
+        partial1UdafEvaluatorMode,
+        partial1ResultsArray);
+
+    final boolean hasDifferentCompleteExpr;
+    if (varianceNames.contains(aggregationName)) {
+      hasDifferentCompleteExpr = true;
+    } else {
+      switch (aggregationName) {
+      case "avg":
+        /*
+        if (typeInfo instanceof DecimalTypeInfo) {
+          // UNDONE: Row-mode GenericUDAFAverage does not call enforcePrecisionScale...
+          hasDifferentCompleteExpr = false;
+        } else {
+          hasDifferentCompleteExpr = true;
+        }
+        */
+        hasDifferentCompleteExpr = true;
+        break;
+      case "count":
+      case "max":
+      case "min":
+      case "sum":
+        hasDifferentCompleteExpr = false;
+        break;
+      default:
+        throw new RuntimeException("Unexpected aggregation name " + aggregationName);
+      }
+    }
+
+    if (hasDifferentCompleteExpr) {
+
+      /*
+       * COMPLETE.
+       */
+
+      VectorRandomRowSource completeRowSource = new VectorRandomRowSource();
+
+      completeRowSource.initGenerationSpecSchema(
+          random, dataAggrGenerationSpecList, /* maxComplexDepth */ 0, /* allowNull */ true,
+          explicitDataTypePhysicalVariationList);
+
+      Object[][] completeRandomRows = completeRowSource.randomRows(TEST_ROW_COUNT);
+
+      final int completeRowCount = completeRandomRows.length;
+      for (int i = 0; i < completeRowCount; i++) {
+        final short shortKey = (short) getLinearRandomNumber(random, dataAggrMaxKeyCount);
+        completeRandomRows[i][0] =
+           ((WritableShortObjectInspector) keyObjectInspector).create((short) shortKey);
+      }
+
+      VectorRandomBatchSource completeBatchSource =
+          VectorRandomBatchSource.createInterestingBatches(
+              random,
+              completeRowSource,
+              completeRandomRows,
+              null);
+
+      GenericUDAFEvaluator completeEvaluator = getEvaluator(aggregationName, typeInfo);
+
+      /*
+      System.out.println(
+          "*DEBUG* GenericUDAFEvaluator for " + aggregationName + ", " + typeInfo.getTypeName() + ": " +
+              completeEvaluator.getClass().getSimpleName());
+      */
+
+      // The only way to get the return object inspector (and its return type) is to
+      // initialize it...
+      final GenericUDAFEvaluator.Mode completeUdafEvaluatorMode = GenericUDAFEvaluator.Mode.COMPLETE;
+      ObjectInspector completeReturnOI =
+          completeEvaluator.init(
+              completeUdafEvaluatorMode,
+              dataAggrParameterObjectInspectors);
+      TypeInfo completeOutputTypeInfo =
+          TypeInfoUtils.getTypeInfoFromObjectInspector(completeReturnOI);
+
+      Object[] completeResultsArray = new Object[AggregationTestMode.count];
+
+      executeAggregationTests(
+          aggregationName,
+          typeInfo,
+          completeEvaluator,
+          completeOutputTypeInfo,
+          completeUdafEvaluatorMode,
+          dataAggrMaxKeyCount,
+          columns,
+          columnNames,
+          dataAggrParameters,
+          completeRandomRows,
+          completeRowSource,
+          completeBatchSource,
+          completeResultsArray);
+
+      verifyAggregationResults(
+          typeInfo,
+          completeOutputTypeInfo,
+          dataAggrMaxKeyCount,
+          completeUdafEvaluatorMode,
+          completeResultsArray);
+    }
+
+    final boolean hasDifferentPartial2Expr;
+    if (varianceNames.contains(aggregationName)) {
+      hasDifferentPartial2Expr = true;
+    } else {
+      switch (aggregationName) {
+      case "avg":
+        hasDifferentPartial2Expr = true;
+        break;
+      case "count":
+      case "max":
+      case "min":
+      case "sum":
+        hasDifferentPartial2Expr = false;
+        break;
+      default:
+        throw new RuntimeException("Unexpected aggregation name " + aggregationName);
+      }
+    }
+
+    if (hasDifferentPartial2Expr && false) {
+
+      /*
+       * PARTIAL2.
+       */
+
+      final GenericUDAFEvaluator.Mode mergeUdafEvaluatorMode = GenericUDAFEvaluator.Mode.PARTIAL2;
+
+      doMerge(
+          mergeUdafEvaluatorMode,
+          random,
+          aggregationName,
+          typeInfo,
+          keyGenerationSpec,
+          columns, columnNames,
+          dataAggrMaxKeyCount,
+          reductionFactor,
+          partial1OutputTypeInfo,
+          partial1ResultsArray);
+    }
+
+    final boolean hasDifferentFinalExpr;
+    if (varianceNames.contains(aggregationName)) {
+      hasDifferentFinalExpr = true;
+    } else {
+      switch (aggregationName) {
+      case "avg":
+        hasDifferentFinalExpr = true;
+        break;
+      case "count":
+        hasDifferentFinalExpr = true;
+        break;
+      case "max":
+      case "min":
+      case "sum":
+        hasDifferentFinalExpr = false;
+        break;
+      default:
+        throw new RuntimeException("Unexpected aggregation name " + aggregationName);
+      }
+    }
+    if (hasDifferentFinalExpr) {
+
+      /*
+       * FINAL.
+       */
+
+      final GenericUDAFEvaluator.Mode mergeUdafEvaluatorMode = GenericUDAFEvaluator.Mode.FINAL;
+
+      doMerge(
+          mergeUdafEvaluatorMode,
+          random,
+          aggregationName,
+          typeInfo,
+          keyGenerationSpec,
+          columns, columnNames,
+          dataAggrMaxKeyCount,
+          reductionFactor,
+          partial1OutputTypeInfo,
+          partial1ResultsArray);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateAddSub.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateAddSub.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateAddSub.java
index f5deca5..c4146be 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateAddSub.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateAddSub.java
@@ -370,6 +370,7 @@ public class TestVectorDateAddSub {
       Object[][] randomRows, ColumnScalarMode columnScalarMode,
       ObjectInspector rowInspector, Object[] resultObjects) throws Exception {
 
+    /*
     System.out.println(
         "*DEBUG* dateTimeStringTypeInfo " + dateTimeStringTypeInfo.toString() +
         " integerTypeInfo " + integerTypeInfo +
@@ -377,6 +378,7 @@ public class TestVectorDateAddSub {
         " dateAddSubTestMode ROW_MODE" +
         " columnScalarMode " + columnScalarMode +
         " exprDesc " + exprDesc.toString());
+    */
 
     HiveConf hiveConf = new HiveConf();
     ExprNodeEvaluator evaluator =

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateDiff.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateDiff.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateDiff.java
index dce7ccf..b382c2a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateDiff.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorDateDiff.java
@@ -362,12 +362,14 @@ public class TestVectorDateDiff {
       Object[][] randomRows, ColumnScalarMode columnScalarMode,
       ObjectInspector rowInspector, Object[] resultObjects) throws Exception {
 
+    /*
     System.out.println(
         "*DEBUG* dateTimeStringTypeInfo " + dateTimeStringTypeInfo1.toString() +
         " dateTimeStringTypeInfo2 " + dateTimeStringTypeInfo2 +
         " dateDiffTestMode ROW_MODE" +
         " columnScalarMode " + columnScalarMode +
         " exprDesc " + exprDesc.toString());
+    */
 
     HiveConf hiveConf = new HiveConf();
     ExprNodeEvaluator evaluator =


[13/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query24.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query24.q.out b/ql/src/test/results/clientpositive/perf/tez/query24.q.out
index 9fcec42..349d429 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query24.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query24.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[286][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 8' is a cross product
+Warning: Shuffle Join MERGEJOIN[290][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 8' is a cross product
 PREHOOK: query: explain
 with ssales as
 (select c_last_name
@@ -137,281 +137,281 @@ Stage-0
           Output:["_col0","_col1","_col2","_col3"]
           Filter Operator [FIL_89] (rows=77303902 width=321)
             predicate:(_col3 > _col4)
-            Merge Join Operator [MERGEJOIN_286] (rows=231911707 width=321)
+            Merge Join Operator [MERGEJOIN_290] (rows=231911707 width=321)
               Conds:(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
             <-Reducer 15 [CUSTOM_SIMPLE_EDGE] vectorized
-              PARTITION_ONLY_SHUFFLE [RS_376]
-                Select Operator [SEL_375] (rows=1 width=232)
+              PARTITION_ONLY_SHUFFLE [RS_380]
+                Select Operator [SEL_379] (rows=1 width=232)
                   Output:["_col0"]
-                  Group By Operator [GBY_374] (rows=1 width=232)
+                  Group By Operator [GBY_378] (rows=1 width=232)
                     Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"]
                   <-Reducer 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                    PARTITION_ONLY_SHUFFLE [RS_373]
-                      Group By Operator [GBY_372] (rows=1 width=232)
+                    PARTITION_ONLY_SHUFFLE [RS_377]
+                      Group By Operator [GBY_376] (rows=1 width=232)
                         Output:["_col0","_col1"],aggregations:["sum(_col10)","count(_col10)"]
-                        Select Operator [SEL_371] (rows=463823414 width=88)
+                        Select Operator [SEL_375] (rows=463823414 width=88)
                           Output:["_col10"]
-                          Group By Operator [GBY_370] (rows=463823414 width=88)
+                          Group By Operator [GBY_374] (rows=463823414 width=88)
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9
                           <-Reducer 13 [SIMPLE_EDGE]
                             SHUFFLE [RS_78]
                               PartitionCols:_col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
                               Group By Operator [GBY_77] (rows=927646829 width=88)
                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"],aggregations:["sum(_col4)"],keys:_col11, _col12, _col6, _col8, _col15, _col16, _col17, _col18, _col19, _col22
-                                Merge Join Operator [MERGEJOIN_285] (rows=927646829 width=88)
-                                  Conds:RS_73._col9, _col13=RS_351._col1, upper(_col2)(Inner),Output:["_col4","_col6","_col8","_col11","_col12","_col15","_col16","_col17","_col18","_col19","_col22"]
+                                Merge Join Operator [MERGEJOIN_289] (rows=927646829 width=88)
+                                  Conds:RS_73._col9, _col13=RS_355._col1, upper(_col2)(Inner),Output:["_col4","_col6","_col8","_col11","_col12","_col15","_col16","_col17","_col18","_col19","_col22"]
                                 <-Map 30 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_351]
+                                  SHUFFLE [RS_355]
                                     PartitionCols:_col1, upper(_col2)
-                                    Select Operator [SEL_349] (rows=40000000 width=1014)
+                                    Select Operator [SEL_353] (rows=40000000 width=1014)
                                       Output:["_col0","_col1","_col2"]
-                                      Filter Operator [FIL_348] (rows=40000000 width=1014)
+                                      Filter Operator [FIL_352] (rows=40000000 width=1014)
                                         predicate:(ca_zip is not null and upper(ca_country) is not null)
                                         TableScan [TS_15] (rows=40000000 width=1014)
                                           default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_state","ca_zip","ca_country"]
                                 <-Reducer 12 [SIMPLE_EDGE]
                                   SHUFFLE [RS_73]
                                     PartitionCols:_col9, _col13
-                                    Merge Join Operator [MERGEJOIN_284] (rows=843315281 width=88)
-                                      Conds:RS_70._col0, _col3=RS_330._col0, _col1(Inner),Output:["_col4","_col6","_col8","_col9","_col11","_col12","_col13","_col15","_col16","_col17","_col18","_col19"]
+                                    Merge Join Operator [MERGEJOIN_288] (rows=843315281 width=88)
+                                      Conds:RS_70._col0, _col3=RS_334._col0, _col1(Inner),Output:["_col4","_col6","_col8","_col9","_col11","_col12","_col13","_col15","_col16","_col17","_col18","_col19"]
                                     <-Map 25 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_330]
+                                      SHUFFLE [RS_334]
                                         PartitionCols:_col0, _col1
-                                        Select Operator [SEL_326] (rows=57591150 width=77)
+                                        Select Operator [SEL_330] (rows=57591150 width=77)
                                           Output:["_col0","_col1"]
-                                          Filter Operator [FIL_325] (rows=57591150 width=77)
+                                          Filter Operator [FIL_329] (rows=57591150 width=77)
                                             predicate:(sr_item_sk is not null and sr_ticket_number is not null)
                                             TableScan [TS_12] (rows=57591150 width=77)
                                               default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number"]
                                     <-Reducer 11 [SIMPLE_EDGE]
                                       SHUFFLE [RS_70]
                                         PartitionCols:_col0, _col3
-                                        Merge Join Operator [MERGEJOIN_283] (rows=766650239 width=88)
-                                          Conds:RS_67._col0=RS_293._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col8","_col9","_col11","_col12","_col13","_col15","_col16","_col17","_col18","_col19"]
+                                        Merge Join Operator [MERGEJOIN_287] (rows=766650239 width=88)
+                                          Conds:RS_67._col0=RS_297._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col8","_col9","_col11","_col12","_col13","_col15","_col16","_col17","_col18","_col19"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_293]
+                                          SHUFFLE [RS_297]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_290] (rows=462000 width=1436)
+                                            Select Operator [SEL_294] (rows=462000 width=1436)
                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                              Filter Operator [FIL_288] (rows=462000 width=1436)
+                                              Filter Operator [FIL_292] (rows=462000 width=1436)
                                                 predicate:i_item_sk is not null
                                                 TableScan [TS_3] (rows=462000 width=1436)
                                                   default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_size","i_color","i_units","i_manager_id"]
                                         <-Reducer 20 [SIMPLE_EDGE]
                                           SHUFFLE [RS_67]
                                             PartitionCols:_col0
-                                            Merge Join Operator [MERGEJOIN_282] (rows=696954748 width=88)
-                                              Conds:RS_64._col1=RS_317._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col8","_col9","_col11","_col12","_col13"]
+                                            Merge Join Operator [MERGEJOIN_286] (rows=696954748 width=88)
+                                              Conds:RS_64._col1=RS_321._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col8","_col9","_col11","_col12","_col13"]
                                             <-Map 22 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_317]
+                                              SHUFFLE [RS_321]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_314] (rows=80000000 width=860)
+                                                Select Operator [SEL_318] (rows=80000000 width=860)
                                                   Output:["_col0","_col1","_col2","_col3"]
-                                                  Filter Operator [FIL_313] (rows=80000000 width=860)
+                                                  Filter Operator [FIL_317] (rows=80000000 width=860)
                                                     predicate:(c_birth_country is not null and c_customer_sk is not null)
                                                     TableScan [TS_9] (rows=80000000 width=860)
                                                       default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_first_name","c_last_name","c_birth_country"]
                                             <-Reducer 19 [SIMPLE_EDGE]
                                               SHUFFLE [RS_64]
                                                 PartitionCols:_col1
-                                                Merge Join Operator [MERGEJOIN_281] (rows=633595212 width=88)
-                                                  Conds:RS_369._col2=RS_305._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col6","_col8","_col9"]
+                                                Merge Join Operator [MERGEJOIN_285] (rows=633595212 width=88)
+                                                  Conds:RS_373._col2=RS_309._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col6","_col8","_col9"]
                                                 <-Map 17 [SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_305]
+                                                  SHUFFLE [RS_309]
                                                     PartitionCols:_col0
-                                                    Select Operator [SEL_302] (rows=852 width=1910)
+                                                    Select Operator [SEL_306] (rows=852 width=1910)
                                                       Output:["_col0","_col1","_col3","_col4"]
-                                                      Filter Operator [FIL_301] (rows=852 width=1910)
+                                                      Filter Operator [FIL_305] (rows=852 width=1910)
                                                         predicate:((s_market_id = 7) and s_store_sk is not null and s_zip is not null)
                                                         TableScan [TS_6] (rows=1704 width=1910)
                                                           default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_market_id","s_state","s_zip"]
                                                 <-Map 31 [SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_369]
+                                                  SHUFFLE [RS_373]
                                                     PartitionCols:_col2
-                                                    Select Operator [SEL_368] (rows=575995635 width=88)
+                                                    Select Operator [SEL_372] (rows=575995635 width=88)
                                                       Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                      Filter Operator [FIL_367] (rows=575995635 width=88)
+                                                      Filter Operator [FIL_371] (rows=575995635 width=88)
                                                         predicate:((ss_customer_sk BETWEEN DynamicValue(RS_65_customer_c_customer_sk_min) AND DynamicValue(RS_65_customer_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_65_customer_c_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_68_item_i_item_sk_min) AND DynamicValue(RS_68_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_68_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_71_store_returns_sr_item_sk_min) AND DynamicValue(RS_71_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_71_store_returns_sr_item_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_62_store_s_store_sk_min) AND DynamicValue(RS_62_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_62_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_71_store_returns_sr_ticket_number_min) AND DynamicValue(RS_71_stor
 e_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_71_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                                         TableScan [TS_43] (rows=575995635 width=88)
                                                           default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_sales_price"]
                                                         <-Reducer 16 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_362]
-                                                            Group By Operator [GBY_361] (rows=1 width=12)
+                                                          BROADCAST [RS_366]
+                                                            Group By Operator [GBY_365] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                             <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_298]
-                                                                Group By Operator [GBY_296] (rows=1 width=12)
+                                                              SHUFFLE [RS_302]
+                                                                Group By Operator [GBY_300] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                  Select Operator [SEL_294] (rows=462000 width=1436)
+                                                                  Select Operator [SEL_298] (rows=462000 width=1436)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_290]
+                                                                     Please refer to the previous Select Operator [SEL_294]
                                                         <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_358]
-                                                            Group By Operator [GBY_357] (rows=1 width=12)
+                                                          BROADCAST [RS_362]
+                                                            Group By Operator [GBY_361] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                             <-Map 17 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_310]
-                                                                Group By Operator [GBY_308] (rows=1 width=12)
+                                                              SHUFFLE [RS_314]
+                                                                Group By Operator [GBY_312] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                  Select Operator [SEL_306] (rows=852 width=1910)
+                                                                  Select Operator [SEL_310] (rows=852 width=1910)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_302]
+                                                                     Please refer to the previous Select Operator [SEL_306]
                                                         <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_360]
-                                                            Group By Operator [GBY_359] (rows=1 width=12)
+                                                          BROADCAST [RS_364]
+                                                            Group By Operator [GBY_363] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=80000000)"]
                                                             <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_322]
-                                                                Group By Operator [GBY_320] (rows=1 width=12)
+                                                              SHUFFLE [RS_326]
+                                                                Group By Operator [GBY_324] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=80000000)"]
-                                                                  Select Operator [SEL_318] (rows=80000000 width=860)
+                                                                  Select Operator [SEL_322] (rows=80000000 width=860)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_314]
+                                                                     Please refer to the previous Select Operator [SEL_318]
                                                         <-Reducer 28 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_364]
-                                                            Group By Operator [GBY_363] (rows=1 width=12)
+                                                          BROADCAST [RS_368]
+                                                            Group By Operator [GBY_367] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
                                                             <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_339]
-                                                                Group By Operator [GBY_335] (rows=1 width=12)
+                                                              SHUFFLE [RS_343]
+                                                                Group By Operator [GBY_339] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                                  Select Operator [SEL_331] (rows=57591150 width=77)
+                                                                  Select Operator [SEL_335] (rows=57591150 width=77)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_326]
+                                                                     Please refer to the previous Select Operator [SEL_330]
                                                         <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_366]
-                                                            Group By Operator [GBY_365] (rows=1 width=12)
+                                                          BROADCAST [RS_370]
+                                                            Group By Operator [GBY_369] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
                                                             <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_340]
-                                                                Group By Operator [GBY_336] (rows=1 width=12)
+                                                              SHUFFLE [RS_344]
+                                                                Group By Operator [GBY_340] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                                  Select Operator [SEL_332] (rows=57591150 width=77)
+                                                                  Select Operator [SEL_336] (rows=57591150 width=77)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_326]
+                                                                     Please refer to the previous Select Operator [SEL_330]
             <-Reducer 7 [CUSTOM_SIMPLE_EDGE] vectorized
-              PARTITION_ONLY_SHUFFLE [RS_356]
-                Select Operator [SEL_355] (rows=231911707 width=88)
+              PARTITION_ONLY_SHUFFLE [RS_360]
+                Select Operator [SEL_359] (rows=231911707 width=88)
                   Output:["_col0","_col1","_col2","_col3"]
-                  Group By Operator [GBY_354] (rows=231911707 width=88)
+                  Group By Operator [GBY_358] (rows=231911707 width=88)
                     Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col9)"],keys:_col1, _col2, _col7
-                    Select Operator [SEL_353] (rows=463823414 width=88)
+                    Select Operator [SEL_357] (rows=463823414 width=88)
                       Output:["_col1","_col2","_col7","_col9"]
-                      Group By Operator [GBY_352] (rows=463823414 width=88)
+                      Group By Operator [GBY_356] (rows=463823414 width=88)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8
                       <-Reducer 6 [SIMPLE_EDGE]
                         SHUFFLE [RS_35]
                           PartitionCols:_col0, _col1, _col2
                           Group By Operator [GBY_34] (rows=927646829 width=88)
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"],aggregations:["sum(_col4)"],keys:_col17, _col18, _col12, _col22, _col6, _col7, _col9, _col10, _col14
-                            Merge Join Operator [MERGEJOIN_280] (rows=927646829 width=88)
-                              Conds:RS_30._col15, _col19=RS_350._col1, upper(_col2)(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col17","_col18","_col22"]
+                            Merge Join Operator [MERGEJOIN_284] (rows=927646829 width=88)
+                              Conds:RS_30._col15, _col19=RS_354._col1, upper(_col2)(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col17","_col18","_col22"]
                             <-Map 30 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_350]
+                              SHUFFLE [RS_354]
                                 PartitionCols:_col1, upper(_col2)
-                                 Please refer to the previous Select Operator [SEL_349]
+                                 Please refer to the previous Select Operator [SEL_353]
                             <-Reducer 5 [SIMPLE_EDGE]
                               SHUFFLE [RS_30]
                                 PartitionCols:_col15, _col19
-                                Merge Join Operator [MERGEJOIN_279] (rows=843315281 width=88)
-                                  Conds:RS_27._col0, _col3=RS_327._col0, _col1(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15","_col17","_col18","_col19"]
+                                Merge Join Operator [MERGEJOIN_283] (rows=843315281 width=88)
+                                  Conds:RS_27._col0, _col3=RS_331._col0, _col1(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15","_col17","_col18","_col19"]
                                 <-Map 25 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_327]
+                                  SHUFFLE [RS_331]
                                     PartitionCols:_col0, _col1
-                                     Please refer to the previous Select Operator [SEL_326]
+                                     Please refer to the previous Select Operator [SEL_330]
                                 <-Reducer 4 [SIMPLE_EDGE]
                                   SHUFFLE [RS_27]
                                     PartitionCols:_col0, _col3
-                                    Merge Join Operator [MERGEJOIN_278] (rows=766650239 width=88)
-                                      Conds:RS_24._col1=RS_315._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15","_col17","_col18","_col19"]
+                                    Merge Join Operator [MERGEJOIN_282] (rows=766650239 width=88)
+                                      Conds:RS_24._col1=RS_319._col0(Inner),Output:["_col0","_col3","_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15","_col17","_col18","_col19"]
                                     <-Map 22 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_315]
+                                      SHUFFLE [RS_319]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_314]
+                                         Please refer to the previous Select Operator [SEL_318]
                                     <-Reducer 3 [SIMPLE_EDGE]
                                       SHUFFLE [RS_24]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_277] (rows=696954748 width=88)
-                                          Conds:RS_21._col2=RS_303._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15"]
+                                        Merge Join Operator [MERGEJOIN_281] (rows=696954748 width=88)
+                                          Conds:RS_21._col2=RS_307._col0(Inner),Output:["_col0","_col1","_col3","_col4","_col6","_col7","_col9","_col10","_col12","_col14","_col15"]
                                         <-Map 17 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_303]
+                                          SHUFFLE [RS_307]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_302]
+                                             Please refer to the previous Select Operator [SEL_306]
                                         <-Reducer 2 [SIMPLE_EDGE]
                                           SHUFFLE [RS_21]
                                             PartitionCols:_col2
-                                            Merge Join Operator [MERGEJOIN_276] (rows=633595212 width=88)
-                                              Conds:RS_347._col0=RS_291._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col9","_col10"]
+                                            Merge Join Operator [MERGEJOIN_280] (rows=633595212 width=88)
+                                              Conds:RS_351._col0=RS_295._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col6","_col7","_col9","_col10"]
                                             <-Map 9 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_291]
+                                              SHUFFLE [RS_295]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_289] (rows=231000 width=1436)
+                                                Select Operator [SEL_293] (rows=231000 width=1436)
                                                   Output:["_col0","_col1","_col2","_col4","_col5"]
-                                                  Filter Operator [FIL_287] (rows=231000 width=1436)
+                                                  Filter Operator [FIL_291] (rows=231000 width=1436)
                                                     predicate:((i_color = 'orchid') and i_item_sk is not null)
                                                      Please refer to the previous TableScan [TS_3]
                                             <-Map 1 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_347]
+                                              SHUFFLE [RS_351]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_346] (rows=575995635 width=88)
+                                                Select Operator [SEL_350] (rows=575995635 width=88)
                                                   Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_345] (rows=575995635 width=88)
+                                                  Filter Operator [FIL_349] (rows=575995635 width=88)
                                                     predicate:((ss_customer_sk BETWEEN DynamicValue(RS_25_customer_c_customer_sk_min) AND DynamicValue(RS_25_customer_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_25_customer_c_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_19_item_i_item_sk_min) AND DynamicValue(RS_19_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_19_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_28_store_returns_sr_item_sk_min) AND DynamicValue(RS_28_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_28_store_returns_sr_item_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_22_store_s_store_sk_min) AND DynamicValue(RS_22_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_22_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_28_store_returns_sr_ticket_number_min) AND DynamicValue(RS_28_store_re
 turns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_28_store_returns_sr_ticket_number_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                                     TableScan [TS_0] (rows=575995635 width=88)
                                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_sales_price"]
                                                     <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_300]
-                                                        Group By Operator [GBY_299] (rows=1 width=12)
+                                                      BROADCAST [RS_304]
+                                                        Group By Operator [GBY_303] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                         <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_297]
-                                                            Group By Operator [GBY_295] (rows=1 width=12)
+                                                          SHUFFLE [RS_301]
+                                                            Group By Operator [GBY_299] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                              Select Operator [SEL_292] (rows=231000 width=1436)
+                                                              Select Operator [SEL_296] (rows=231000 width=1436)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_289]
+                                                                 Please refer to the previous Select Operator [SEL_293]
                                                     <-Reducer 18 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_312]
-                                                        Group By Operator [GBY_311] (rows=1 width=12)
+                                                      BROADCAST [RS_316]
+                                                        Group By Operator [GBY_315] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                         <-Map 17 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_309]
-                                                            Group By Operator [GBY_307] (rows=1 width=12)
+                                                          SHUFFLE [RS_313]
+                                                            Group By Operator [GBY_311] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                              Select Operator [SEL_304] (rows=852 width=1910)
+                                                              Select Operator [SEL_308] (rows=852 width=1910)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_302]
+                                                                 Please refer to the previous Select Operator [SEL_306]
                                                     <-Reducer 23 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_324]
-                                                        Group By Operator [GBY_323] (rows=1 width=12)
+                                                      BROADCAST [RS_328]
+                                                        Group By Operator [GBY_327] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=80000000)"]
                                                         <-Map 22 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_321]
-                                                            Group By Operator [GBY_319] (rows=1 width=12)
+                                                          SHUFFLE [RS_325]
+                                                            Group By Operator [GBY_323] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=80000000)"]
-                                                              Select Operator [SEL_316] (rows=80000000 width=860)
+                                                              Select Operator [SEL_320] (rows=80000000 width=860)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_314]
+                                                                 Please refer to the previous Select Operator [SEL_318]
                                                     <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_342]
-                                                        Group By Operator [GBY_341] (rows=1 width=12)
+                                                      BROADCAST [RS_346]
+                                                        Group By Operator [GBY_345] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
                                                         <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_337]
-                                                            Group By Operator [GBY_333] (rows=1 width=12)
+                                                          SHUFFLE [RS_341]
+                                                            Group By Operator [GBY_337] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                              Select Operator [SEL_328] (rows=57591150 width=77)
+                                                              Select Operator [SEL_332] (rows=57591150 width=77)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_326]
+                                                                 Please refer to the previous Select Operator [SEL_330]
                                                     <-Reducer 27 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_344]
-                                                        Group By Operator [GBY_343] (rows=1 width=12)
+                                                      BROADCAST [RS_348]
+                                                        Group By Operator [GBY_347] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
                                                         <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_338]
-                                                            Group By Operator [GBY_334] (rows=1 width=12)
+                                                          SHUFFLE [RS_342]
+                                                            Group By Operator [GBY_338] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                              Select Operator [SEL_329] (rows=57591150 width=77)
+                                                              Select Operator [SEL_333] (rows=57591150 width=77)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_326]
+                                                                 Please refer to the previous Select Operator [SEL_330]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query25.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query25.q.out b/ql/src/test/results/clientpositive/perf/tez/query25.q.out
index a885cf3..77a9a07 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query25.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query25.q.out
@@ -95,7 +95,7 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 1 <- Reducer 19 (BROADCAST_EDGE), Reducer 21 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
+Map 1 <- Reducer 14 (BROADCAST_EDGE), Reducer 15 (BROADCAST_EDGE), Reducer 19 (BROADCAST_EDGE), Reducer 21 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
 Map 16 <- Reducer 12 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE), Reducer 15 (BROADCAST_EDGE), Reducer 19 (BROADCAST_EDGE)
 Reducer 10 <- Map 16 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
 Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 13 (SIMPLE_EDGE)
@@ -118,189 +118,195 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_259]
-        Limit [LIM_258] (rows=100 width=88)
+      File Output Operator [FS_269]
+        Limit [LIM_268] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_257] (rows=421657640 width=88)
+          Select Operator [SEL_267] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_256]
-              Group By Operator [GBY_255] (rows=421657640 width=88)
+            SHUFFLE [RS_266]
+              Group By Operator [GBY_265] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_49]
                   PartitionCols:_col0, _col1, _col2, _col3
                   Group By Operator [GBY_48] (rows=843315281 width=88)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col5)","sum(_col20)","sum(_col12)"],keys:_col25, _col26, _col28, _col29
-                    Merge Join Operator [MERGEJOIN_205] (rows=843315281 width=88)
-                      Conds:RS_44._col3=RS_234._col0(Inner),Output:["_col5","_col12","_col20","_col25","_col26","_col28","_col29"]
+                    Merge Join Operator [MERGEJOIN_213] (rows=843315281 width=88)
+                      Conds:RS_44._col3=RS_251._col0(Inner),Output:["_col5","_col12","_col20","_col25","_col26","_col28","_col29"]
                     <-Map 20 [SIMPLE_EDGE] vectorized
-                      SHUFFLE [RS_234]
+                      SHUFFLE [RS_251]
                         PartitionCols:_col0
-                        Select Operator [SEL_233] (rows=1704 width=1910)
+                        Select Operator [SEL_250] (rows=1704 width=1910)
                           Output:["_col0","_col1","_col2"]
-                          Filter Operator [FIL_232] (rows=1704 width=1910)
+                          Filter Operator [FIL_249] (rows=1704 width=1910)
                             predicate:s_store_sk is not null
                             TableScan [TS_32] (rows=1704 width=1910)
                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
                     <-Reducer 4 [SIMPLE_EDGE]
                       SHUFFLE [RS_44]
                         PartitionCols:_col3
-                        Merge Join Operator [MERGEJOIN_204] (rows=766650239 width=88)
-                          Conds:RS_41._col1=RS_225._col0(Inner),Output:["_col3","_col5","_col12","_col20","_col25","_col26"]
+                        Merge Join Operator [MERGEJOIN_212] (rows=766650239 width=88)
+                          Conds:RS_41._col1=RS_242._col0(Inner),Output:["_col3","_col5","_col12","_col20","_col25","_col26"]
                         <-Map 18 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_225]
+                          SHUFFLE [RS_242]
                             PartitionCols:_col0
-                            Select Operator [SEL_224] (rows=462000 width=1436)
+                            Select Operator [SEL_241] (rows=462000 width=1436)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_223] (rows=462000 width=1436)
+                              Filter Operator [FIL_240] (rows=462000 width=1436)
                                 predicate:i_item_sk is not null
                                 TableScan [TS_29] (rows=462000 width=1436)
                                   default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
                         <-Reducer 3 [SIMPLE_EDGE]
                           SHUFFLE [RS_41]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_203] (rows=696954748 width=88)
+                            Merge Join Operator [MERGEJOIN_211] (rows=696954748 width=88)
                               Conds:RS_38._col1, _col2, _col4=RS_39._col8, _col9, _col10(Inner),Output:["_col1","_col3","_col5","_col12","_col20"]
                             <-Reducer 11 [SIMPLE_EDGE]
                               SHUFFLE [RS_39]
                                 PartitionCols:_col8, _col9, _col10
-                                Merge Join Operator [MERGEJOIN_202] (rows=348467716 width=135)
+                                Merge Join Operator [MERGEJOIN_210] (rows=348467716 width=135)
                                   Conds:RS_25._col2, _col1=RS_26._col1, _col2(Inner),Output:["_col3","_col8","_col9","_col10","_col11"]
                                 <-Reducer 13 [SIMPLE_EDGE]
                                   PARTITION_ONLY_SHUFFLE [RS_26]
                                     PartitionCols:_col1, _col2
-                                    Merge Join Operator [MERGEJOIN_201] (rows=63350266 width=77)
-                                      Conds:RS_247._col0=RS_216._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                    Merge Join Operator [MERGEJOIN_209] (rows=63350266 width=77)
+                                      Conds:RS_233._col0=RS_224._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                                     <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_216]
+                                      PARTITION_ONLY_SHUFFLE [RS_224]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_211] (rows=4058 width=1119)
+                                        Select Operator [SEL_219] (rows=4058 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_208] (rows=4058 width=1119)
+                                          Filter Operator [FIL_216] (rows=4058 width=1119)
                                             predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
                                             TableScan [TS_3] (rows=73049 width=1119)
                                               default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                     <-Map 17 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_247]
+                                      SHUFFLE [RS_233]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_246] (rows=57591150 width=77)
+                                        Select Operator [SEL_232] (rows=57591150 width=77)
                                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                                          Filter Operator [FIL_245] (rows=57591150 width=77)
+                                          Filter Operator [FIL_231] (rows=57591150 width=77)
                                             predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
                                             TableScan [TS_12] (rows=57591150 width=77)
                                               default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_net_loss"]
                                 <-Reducer 10 [SIMPLE_EDGE]
                                   SHUFFLE [RS_25]
                                     PartitionCols:_col2, _col1
-                                    Merge Join Operator [MERGEJOIN_200] (rows=316788826 width=135)
-                                      Conds:RS_254._col0=RS_214._col0(Inner),Output:["_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_208] (rows=316788826 width=135)
+                                      Conds:RS_264._col0=RS_222._col0(Inner),Output:["_col1","_col2","_col3"]
                                     <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_214]
+                                      PARTITION_ONLY_SHUFFLE [RS_222]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_210] (rows=4058 width=1119)
+                                        Select Operator [SEL_218] (rows=4058 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_207] (rows=4058 width=1119)
+                                          Filter Operator [FIL_215] (rows=4058 width=1119)
                                             predicate:((d_year = 2000) and d_date_sk is not null and d_moy BETWEEN 4 AND 10)
                                              Please refer to the previous TableScan [TS_3]
                                     <-Map 16 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_254]
+                                      SHUFFLE [RS_264]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_253] (rows=287989836 width=135)
+                                        Select Operator [SEL_263] (rows=287989836 width=135)
                                           Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_252] (rows=287989836 width=135)
+                                          Filter Operator [FIL_262] (rows=287989836 width=135)
                                             predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_23_d3_d_date_sk_min) AND DynamicValue(RS_23_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_23_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                             TableScan [TS_6] (rows=287989836 width=135)
                                               default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_net_profit"]
-                                            <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_231]
-                                                Group By Operator [GBY_229] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_228]
-                                                    Group By Operator [GBY_227] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_226] (rows=462000 width=1436)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_224]
-                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_244]
-                                                Group By Operator [GBY_243] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_220]
-                                                    Group By Operator [GBY_218] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_215] (rows=4058 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_210]
                                             <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_249]
-                                                Group By Operator [GBY_248] (rows=1 width=12)
+                                              BROADCAST [RS_236]
+                                                Group By Operator [GBY_234] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                 <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_137]
-                                                    Group By Operator [GBY_136] (rows=1 width=12)
+                                                  PARTITION_ONLY_SHUFFLE [RS_107]
+                                                    Group By Operator [GBY_106] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_135] (rows=63350266 width=77)
+                                                      Select Operator [SEL_105] (rows=63350266 width=77)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_201]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
                                             <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_251]
-                                                Group By Operator [GBY_250] (rows=1 width=12)
+                                              BROADCAST [RS_239]
+                                                Group By Operator [GBY_237] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                 <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_142]
-                                                    Group By Operator [GBY_141] (rows=1 width=12)
+                                                  PARTITION_ONLY_SHUFFLE [RS_122]
+                                                    Group By Operator [GBY_121] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_140] (rows=63350266 width=77)
+                                                      Select Operator [SEL_120] (rows=63350266 width=77)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
+                                            <-Reducer 19 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_248]
+                                                Group By Operator [GBY_246] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_245]
+                                                    Group By Operator [GBY_244] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_243] (rows=462000 width=1436)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_201]
+                                                         Please refer to the previous Select Operator [SEL_241]
+                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_261]
+                                                Group By Operator [GBY_260] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_228]
+                                                    Group By Operator [GBY_226] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_223] (rows=4058 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_218]
                             <-Reducer 2 [SIMPLE_EDGE]
                               SHUFFLE [RS_38]
                                 PartitionCols:_col1, _col2, _col4
-                                Merge Join Operator [MERGEJOIN_199] (rows=633595212 width=88)
-                                  Conds:RS_242._col0=RS_212._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                Merge Join Operator [MERGEJOIN_207] (rows=633595212 width=88)
+                                  Conds:RS_259._col0=RS_220._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
                                 <-Map 8 [SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_212]
+                                  PARTITION_ONLY_SHUFFLE [RS_220]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_209] (rows=18262 width=1119)
+                                    Select Operator [SEL_217] (rows=18262 width=1119)
                                       Output:["_col0"]
-                                      Filter Operator [FIL_206] (rows=18262 width=1119)
+                                      Filter Operator [FIL_214] (rows=18262 width=1119)
                                         predicate:((d_moy = 4) and (d_year = 2000) and d_date_sk is not null)
                                          Please refer to the previous TableScan [TS_3]
                                 <-Map 1 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_242]
+                                  SHUFFLE [RS_259]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_241] (rows=575995635 width=88)
+                                    Select Operator [SEL_258] (rows=575995635 width=88)
                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                      Filter Operator [FIL_240] (rows=575995635 width=88)
-                                        predicate:((ss_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                      Filter Operator [FIL_257] (rows=575995635 width=88)
+                                        predicate:((ss_customer_sk BETWEEN DynamicValue(RS_26_store_returns_sr_customer_sk_min) AND DynamicValue(RS_26_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_26_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_26_store_returns_sr_item_sk_min) AND DynamicValue(RS_26_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_26_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_42_item_i_item_sk_min) AND DynamicValue(RS_42_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_42_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max) and
  in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                         TableScan [TS_0] (rows=575995635 width=88)
                                           default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_net_profit"]
+                                        <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_235]
+                                             Please refer to the previous Group By Operator [GBY_234]
+                                        <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                          BROADCAST [RS_238]
+                                             Please refer to the previous Group By Operator [GBY_237]
                                         <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_230]
-                                             Please refer to the previous Group By Operator [GBY_229]
+                                          BROADCAST [RS_247]
+                                             Please refer to the previous Group By Operator [GBY_246]
                                         <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_239]
-                                            Group By Operator [GBY_238] (rows=1 width=12)
+                                          BROADCAST [RS_256]
+                                            Group By Operator [GBY_255] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                             <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_237]
-                                                Group By Operator [GBY_236] (rows=1 width=12)
+                                              SHUFFLE [RS_254]
+                                                Group By Operator [GBY_253] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_235] (rows=1704 width=1910)
+                                                  Select Operator [SEL_252] (rows=1704 width=1910)
                                                     Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_233]
+                                                     Please refer to the previous Select Operator [SEL_250]
                                         <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_222]
-                                            Group By Operator [GBY_221] (rows=1 width=12)
+                                          BROADCAST [RS_230]
+                                            Group By Operator [GBY_229] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                             <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              PARTITION_ONLY_SHUFFLE [RS_219]
-                                                Group By Operator [GBY_217] (rows=1 width=12)
+                                              PARTITION_ONLY_SHUFFLE [RS_227]
+                                                Group By Operator [GBY_225] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_213] (rows=18262 width=1119)
+                                                  Select Operator [SEL_221] (rows=18262 width=1119)
                                                     Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_209]
+                                                     Please refer to the previous Select Operator [SEL_217]
 


[37/48] hive git commit: HIVE-18705: Improve HiveMetaStoreClient.dropDatabase (Adam Szita, reviewed by Peter Vary)

Posted by se...@apache.org.
HIVE-18705: Improve HiveMetaStoreClient.dropDatabase (Adam Szita, reviewed by Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3e023546
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3e023546
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3e023546

Branch: refs/heads/master-txnstats
Commit: 3e023546186823e3cc8ea5ce32828a52743ed1fe
Parents: 4ab1080
Author: Adam Szita <sz...@cloudera.com>
Authored: Tue Jul 17 10:27:10 2018 +0200
Committer: Adam Szita <sz...@cloudera.com>
Committed: Tue Jul 17 10:27:10 2018 +0200

----------------------------------------------------------------------
 .../positive/drop_database_table_hooks.q        |  57 ++++
 .../positive/drop_database_table_hooks.q.out    | 258 +++++++++++++++++++
 .../hadoop/hive/ql/metadata/TableIterable.java  | 104 --------
 .../hive/ql/metadata/TestTableIterable.java     |  67 -----
 .../cli/operation/GetColumnsOperation.java      |   2 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |   1 +
 .../hive/metastore/HiveMetaStoreClient.java     | 117 ++++++++-
 .../hadoop/hive/metastore/TableIterable.java    | 115 +++++++++
 .../hive/metastore/TestTableIterable.java       |  76 ++++++
 9 files changed, 616 insertions(+), 181 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/hbase-handler/src/test/queries/positive/drop_database_table_hooks.q
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/queries/positive/drop_database_table_hooks.q b/hbase-handler/src/test/queries/positive/drop_database_table_hooks.q
new file mode 100644
index 0000000..96263d0
--- /dev/null
+++ b/hbase-handler/src/test/queries/positive/drop_database_table_hooks.q
@@ -0,0 +1,57 @@
+CREATE DATABASE sometableshavehook;
+USE sometableshavehook;
+
+CREATE TABLE NOHOOK0 (name string, number int);
+CREATE TABLE NOHOOK1 (name string, number int);
+CREATE TABLE NOHOOK2 (name string, number int);
+CREATE TABLE NOHOOK3 (name string, number int);
+CREATE TABLE NOHOOK4 (name string, number int);
+
+CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+
+set metastore.batch.retrieve.max=5;
+DROP DATABASE sometableshavehook CASCADE;
+SHOW DATABASES;
+
+CREATE DATABASE sometableshavehook;
+USE sometableshavehook;
+
+CREATE TABLE NOHOOK0 (name string, number int);
+CREATE TABLE NOHOOK1 (name string, number int);
+CREATE TABLE NOHOOK2 (name string, number int);
+CREATE TABLE NOHOOK3 (name string, number int);
+CREATE TABLE NOHOOK4 (name string, number int);
+
+CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+);
+
+set metastore.batch.retrieve.max=300;
+DROP DATABASE sometableshavehook CASCADE;
+SHOW DATABASES;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/hbase-handler/src/test/results/positive/drop_database_table_hooks.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/drop_database_table_hooks.q.out b/hbase-handler/src/test/results/positive/drop_database_table_hooks.q.out
new file mode 100644
index 0000000..90713ef
--- /dev/null
+++ b/hbase-handler/src/test/results/positive/drop_database_table_hooks.q.out
@@ -0,0 +1,258 @@
+PREHOOK: query: CREATE DATABASE sometableshavehook
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:sometableshavehook
+POSTHOOK: query: CREATE DATABASE sometableshavehook
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:sometableshavehook
+PREHOOK: query: USE sometableshavehook
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:sometableshavehook
+POSTHOOK: query: USE sometableshavehook
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:sometableshavehook
+PREHOOK: query: CREATE TABLE NOHOOK0 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK0
+POSTHOOK: query: CREATE TABLE NOHOOK0 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK0
+PREHOOK: query: CREATE TABLE NOHOOK1 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK1
+POSTHOOK: query: CREATE TABLE NOHOOK1 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK1
+PREHOOK: query: CREATE TABLE NOHOOK2 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK2
+POSTHOOK: query: CREATE TABLE NOHOOK2 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK2
+PREHOOK: query: CREATE TABLE NOHOOK3 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK3
+POSTHOOK: query: CREATE TABLE NOHOOK3 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK3
+PREHOOK: query: CREATE TABLE NOHOOK4 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK4
+POSTHOOK: query: CREATE TABLE NOHOOK4 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK4
+PREHOOK: query: CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK0
+POSTHOOK: query: CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK0
+PREHOOK: query: CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK1
+POSTHOOK: query: CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK1
+PREHOOK: query: CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK2
+POSTHOOK: query: CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK2
+PREHOOK: query: DROP DATABASE sometableshavehook CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:sometableshavehook
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@hbasehook0
+PREHOOK: Output: sometableshavehook@hbasehook1
+PREHOOK: Output: sometableshavehook@hbasehook2
+PREHOOK: Output: sometableshavehook@nohook0
+PREHOOK: Output: sometableshavehook@nohook1
+PREHOOK: Output: sometableshavehook@nohook2
+PREHOOK: Output: sometableshavehook@nohook3
+PREHOOK: Output: sometableshavehook@nohook4
+POSTHOOK: query: DROP DATABASE sometableshavehook CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:sometableshavehook
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@hbasehook0
+POSTHOOK: Output: sometableshavehook@hbasehook1
+POSTHOOK: Output: sometableshavehook@hbasehook2
+POSTHOOK: Output: sometableshavehook@nohook0
+POSTHOOK: Output: sometableshavehook@nohook1
+POSTHOOK: Output: sometableshavehook@nohook2
+POSTHOOK: Output: sometableshavehook@nohook3
+POSTHOOK: Output: sometableshavehook@nohook4
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default
+PREHOOK: query: CREATE DATABASE sometableshavehook
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:sometableshavehook
+POSTHOOK: query: CREATE DATABASE sometableshavehook
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:sometableshavehook
+PREHOOK: query: USE sometableshavehook
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:sometableshavehook
+POSTHOOK: query: USE sometableshavehook
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:sometableshavehook
+PREHOOK: query: CREATE TABLE NOHOOK0 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK0
+POSTHOOK: query: CREATE TABLE NOHOOK0 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK0
+PREHOOK: query: CREATE TABLE NOHOOK1 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK1
+POSTHOOK: query: CREATE TABLE NOHOOK1 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK1
+PREHOOK: query: CREATE TABLE NOHOOK2 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK2
+POSTHOOK: query: CREATE TABLE NOHOOK2 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK2
+PREHOOK: query: CREATE TABLE NOHOOK3 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK3
+POSTHOOK: query: CREATE TABLE NOHOOK3 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK3
+PREHOOK: query: CREATE TABLE NOHOOK4 (name string, number int)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@NOHOOK4
+POSTHOOK: query: CREATE TABLE NOHOOK4 (name string, number int)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@NOHOOK4
+PREHOOK: query: CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK0
+POSTHOOK: query: CREATE TABLE HBASEHOOK0 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK0
+PREHOOK: query: CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK1
+POSTHOOK: query: CREATE TABLE HBASEHOOK1 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK1
+PREHOOK: query: CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@HBASEHOOK2
+POSTHOOK: query: CREATE TABLE HBASEHOOK2 (key int, val binary)
+    STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
+    WITH SERDEPROPERTIES (
+    "hbase.columns.mapping" = ":key,cf:val#b"
+)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@HBASEHOOK2
+PREHOOK: query: DROP DATABASE sometableshavehook CASCADE
+PREHOOK: type: DROPDATABASE
+PREHOOK: Input: database:sometableshavehook
+PREHOOK: Output: database:sometableshavehook
+PREHOOK: Output: sometableshavehook@hbasehook0
+PREHOOK: Output: sometableshavehook@hbasehook1
+PREHOOK: Output: sometableshavehook@hbasehook2
+PREHOOK: Output: sometableshavehook@nohook0
+PREHOOK: Output: sometableshavehook@nohook1
+PREHOOK: Output: sometableshavehook@nohook2
+PREHOOK: Output: sometableshavehook@nohook3
+PREHOOK: Output: sometableshavehook@nohook4
+POSTHOOK: query: DROP DATABASE sometableshavehook CASCADE
+POSTHOOK: type: DROPDATABASE
+POSTHOOK: Input: database:sometableshavehook
+POSTHOOK: Output: database:sometableshavehook
+POSTHOOK: Output: sometableshavehook@hbasehook0
+POSTHOOK: Output: sometableshavehook@hbasehook1
+POSTHOOK: Output: sometableshavehook@hbasehook2
+POSTHOOK: Output: sometableshavehook@nohook0
+POSTHOOK: Output: sometableshavehook@nohook1
+POSTHOOK: Output: sometableshavehook@nohook2
+POSTHOOK: Output: sometableshavehook@nohook3
+POSTHOOK: Output: sometableshavehook@nohook4
+PREHOOK: query: SHOW DATABASES
+PREHOOK: type: SHOWDATABASES
+POSTHOOK: query: SHOW DATABASES
+POSTHOOK: type: SHOWDATABASES
+default

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/ql/src/java/org/apache/hadoop/hive/ql/metadata/TableIterable.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/TableIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/TableIterable.java
deleted file mode 100644
index d8e771d..0000000
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/TableIterable.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.metadata;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.thrift.TException;
-
-/**
- * Use this to get Table objects for a table list. It provides an iterator to
- * on the resulting Table objects. It batches the calls to
- * IMetaStoreClient.getTableObjectsByName to avoid OOM issues in HS2 (with
- * embedded metastore) or MetaStore server (if HS2 is using remote metastore).
- *
- */
-public class TableIterable implements Iterable<Table> {
-
-  @Override
-  public Iterator<Table> iterator() {
-    return new Iterator<Table>() {
-
-      private final Iterator<String> tableNamesIter = tableNames.iterator();
-      private Iterator<org.apache.hadoop.hive.metastore.api.Table> batchIter = null;
-
-      @Override
-      public boolean hasNext() {
-        return ((batchIter != null) && batchIter.hasNext()) || tableNamesIter.hasNext();
-      }
-
-      @Override
-      public Table next() {
-        if ((batchIter == null) || !batchIter.hasNext()) {
-          getNextBatch();
-        }
-        return batchIter.next();
-      }
-
-      private void getNextBatch() {
-        // get next batch of table names in this list
-        List<String> nameBatch = new ArrayList<String>();
-        int batch_counter = 0;
-        while (batch_counter < batch_size && tableNamesIter.hasNext()) {
-          nameBatch.add(tableNamesIter.next());
-          batch_counter++;
-        }
-        // get the Table objects for this batch of table names and get iterator
-        // on it
-        try {
-          try {
-            batchIter = msc.getTableObjectsByName(dbname, nameBatch).iterator();
-          } catch (TException e) {
-            throw new HiveException(e);
-          }
-        } catch (HiveException e) {
-          throw new RuntimeException(e);
-        }
-      }
-
-      @Override
-      public void remove() {
-        throw new IllegalStateException(
-            "TableIterable is a read-only iterable and remove() is unsupported");
-      }
-    };
-  }
-
-  private final IMetaStoreClient msc;
-  private final String dbname;
-  private final List<String> tableNames;
-  private final int batch_size;
-
-  /**
-   * Primary constructor that fetches all tables in a given msc, given a Hive
-   * object,a db name and a table name list
-   */
-  public TableIterable(IMetaStoreClient msc, String dbname, List<String> tableNames, int batch_size)
-      throws TException {
-    this.msc = msc;
-    this.dbname = dbname;
-    this.tableNames = tableNames;
-    this.batch_size = batch_size;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
deleted file mode 100644
index 6637d15..0000000
--- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestTableIterable.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.ql.metadata;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.junit.Test;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.thrift.TException;
-
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
-/**
- * Unit tests for TableIterable
- */
-public class TestTableIterable  {
-
-  @Test
-  public void testNumReturned() throws MetaException, InvalidOperationException, UnknownDBException, TException {
-    HiveMetaStoreClient msc = mock(HiveMetaStoreClient.class);
-
-
-    // create a mocked metastore client that returns 3 table objects every time it is called
-    // will use same size for TableIterable batch fetch size
-    List<Table> threeTables = Arrays.asList(new Table(), new Table(), new Table());
-    when(msc.getTableObjectsByName(anyString(), anyListOf(String.class))).thenReturn(threeTables);
-
-    List<String> tableNames = Arrays.asList("a", "b", "c", "d", "e", "f");
-    TableIterable tIterable = new TableIterable(msc, "dummy", tableNames, threeTables.size());
-    tIterable.iterator();
-
-    Iterator<Table> tIter = tIterable.iterator();
-    int size = 0;
-    while(tIter.hasNext()) {
-      size++;
-      tIter.next();
-    }
-    assertEquals("Number of table objects returned", size, tableNames.size());
-
-    verify(msc).getTableObjectsByName("dummy", Arrays.asList("a","b","c"));
-    verify(msc).getTableObjectsByName("dummy", Arrays.asList("d","e","f"));
-    
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
index 838dd89..6bbdce5 100644
--- a/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
+++ b/service/src/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.metadata.TableIterable;
+import org.apache.hadoop.hive.metastore.TableIterable;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
 import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index e6f7333..47f819b 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -1535,6 +1535,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
           if (tables != null && !tables.isEmpty()) {
             for (Table table : tables) {
+
               // If the table is not external and it might not be in a subdirectory of the database
               // add it's locations to the list of paths to delete
               Path tablePath = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index acdb73b..92e2805 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1039,6 +1039,8 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
       return;
     }
 
+    String dbNameWithCatalog = prependCatalogToDbName(catalogName, dbName, conf);
+
     if (cascade) {
       // Note that this logic may drop some of the tables of the database
       // even if the drop database fail for any reason
@@ -1048,18 +1050,115 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
         // First we delete the materialized views
         dropTable(dbName, table, deleteData, true);
       }
-      List<String> tableList = getAllTables(dbName);
-      for (String table : tableList) {
-        // Now we delete the rest of tables
-        try {
-          // Subclasses can override this step (for example, for temporary tables)
-          dropTable(dbName, table, deleteData, true);
-        } catch (UnsupportedOperationException e) {
-          // Ignore Index tables, those will be dropped with parent tables
+
+      /**
+       * When dropping db cascade, client side hooks have to be called at each table removal.
+       * If {@link org.apache.hadoop.hive.metastore.conf.MetastoreConf#ConfVars.BATCH_RETRIEVE_MAX
+       * BATCH_RETRIEVE_MAX} is less than the number of tables in the DB, we'll have to call the
+       * hooks one by one each alongside with a
+       * {@link #dropTable(String, String, boolean, boolean, EnvironmentContext) dropTable} call to
+       * ensure transactionality.
+       */
+      List<String> tableNameList = getAllTables(dbName);
+      int tableCount = tableNameList.size();
+      int maxBatchSize = MetastoreConf.getIntVar(conf, ConfVars.BATCH_RETRIEVE_MAX);
+      LOG.debug("Selecting dropDatabase method for " + dbName + " (" + tableCount + " tables), " +
+             ConfVars.BATCH_RETRIEVE_MAX.getVarname() + "=" + maxBatchSize);
+
+      if (tableCount > maxBatchSize) {
+        LOG.debug("Dropping database in a per table batch manner.");
+        dropDatabaseCascadePerTable(catalogName, dbName, tableNameList, deleteData, maxBatchSize);
+      } else {
+        LOG.debug("Dropping database in a per DB manner.");
+        dropDatabaseCascadePerDb(catalogName, dbName, tableNameList, deleteData);
+      }
+
+    } else {
+      client.drop_database(dbNameWithCatalog, deleteData, cascade);
+    }
+  }
+
+  /**
+   * Handles dropDatabase by invoking drop_table in HMS for each table.
+   * Useful when table list in DB is too large to fit in memory. It will retrieve tables in
+   * chunks and for each table with a drop_table hook it will invoke drop_table on both HMS and
+   * the hook. This is a timely operation so hookless tables are skipped and will be dropped on
+   * server side when the client invokes drop_database.
+   * Note that this is 'less transactional' than dropDatabaseCascadePerDb since we're dropping
+   * table level objects, so the overall outcome of this method might result in a halfly dropped DB.
+   * @param catName
+   * @param dbName
+   * @param tableList
+   * @param deleteData
+   * @param maxBatchSize
+   * @throws TException
+   */
+  private void dropDatabaseCascadePerTable(String catName, String dbName, List<String> tableList,
+                                           boolean deleteData, int maxBatchSize) throws TException {
+    String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+    for (Table table : new TableIterable(this, catName, dbName, tableList, maxBatchSize)) {
+      boolean success = false;
+      HiveMetaHook hook = getHook(table);
+      if (hook == null) {
+        continue;
+      }
+      try {
+        hook.preDropTable(table);
+        client.drop_table_with_environment_context(dbNameWithCatalog, table.getTableName(), deleteData, null);
+        hook.commitDropTable(table, deleteData);
+        success = true;
+      } finally {
+        if (!success) {
+          hook.rollbackDropTable(table);
+        }
+      }
+    }
+    client.drop_database(dbNameWithCatalog, deleteData, true);
+  }
+
+  /**
+   * Handles dropDatabase by invoking drop_database in HMS.
+   * Useful when table list in DB can fit in memory, it will retrieve all tables at once and
+   * call drop_database once. Also handles drop_table hooks.
+   * @param catName
+   * @param dbName
+   * @param tableList
+   * @param deleteData
+   * @throws TException
+   */
+  private void dropDatabaseCascadePerDb(String catName, String dbName, List<String> tableList,
+                                        boolean deleteData) throws TException {
+    String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+    List<Table> tables = getTableObjectsByName(catName, dbName, tableList);
+    boolean success = false;
+    try {
+      for (Table table : tables) {
+        HiveMetaHook hook = getHook(table);
+        if (hook == null) {
+          continue;
+        }
+        hook.preDropTable(table);
+      }
+      client.drop_database(dbNameWithCatalog, deleteData, true);
+      for (Table table : tables) {
+        HiveMetaHook hook = getHook(table);
+        if (hook == null) {
+          continue;
+        }
+        hook.commitDropTable(table, deleteData);
+      }
+      success = true;
+    } finally {
+      if (!success) {
+        for (Table table : tables) {
+          HiveMetaHook hook = getHook(table);
+          if (hook == null) {
+            continue;
+          }
+          hook.rollbackDropTable(table);
         }
       }
     }
-    client.drop_database(prependCatalogToDbName(catalogName, dbName, conf), deleteData, cascade);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
new file mode 100644
index 0000000..1a17fe3
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/TableIterable.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.thrift.TException;
+
+/**
+ * Use this to get Table objects for a table list. It provides an iterator to
+ * on the resulting Table objects. It batches the calls to
+ * IMetaStoreClient.getTableObjectsByName to avoid OOM issues in HS2 (with
+ * embedded metastore) or MetaStore server (if HS2 is using remote metastore).
+ *
+ */
+public class TableIterable implements Iterable<Table> {
+
+  @Override
+  public Iterator<Table> iterator() {
+    return new Iterator<Table>() {
+
+      private final Iterator<String> tableNamesIter = tableNames.iterator();
+      private Iterator<org.apache.hadoop.hive.metastore.api.Table> batchIter = null;
+
+      @Override
+      public boolean hasNext() {
+        return ((batchIter != null) && batchIter.hasNext()) || tableNamesIter.hasNext();
+      }
+
+      @Override
+      public Table next() {
+        if ((batchIter == null) || !batchIter.hasNext()) {
+          getNextBatch();
+        }
+        return batchIter.next();
+      }
+
+      private void getNextBatch() {
+        // get next batch of table names in this list
+        List<String> nameBatch = new ArrayList<String>();
+        int batchCounter = 0;
+        while (batchCounter < batchSize && tableNamesIter.hasNext()) {
+          nameBatch.add(tableNamesIter.next());
+          batchCounter++;
+        }
+        // get the Table objects for this batch of table names and get iterator
+        // on it
+
+        try {
+          if (catName != null) {
+            batchIter = msc.getTableObjectsByName(catName, dbname, nameBatch).iterator();
+          } else {
+            batchIter = msc.getTableObjectsByName(dbname, nameBatch).iterator();
+          }
+        } catch (TException e) {
+          throw new RuntimeException(e);
+        }
+
+      }
+
+      @Override
+      public void remove() {
+        throw new IllegalStateException(
+            "TableIterable is a read-only iterable and remove() is unsupported");
+      }
+    };
+  }
+
+  private final IMetaStoreClient msc;
+  private final String dbname;
+  private final List<String> tableNames;
+  private final int batchSize;
+  private final String catName;
+
+  /**
+   * Primary constructor that fetches all tables in a given msc, given a Hive
+   * object,a db name and a table name list.
+   */
+  public TableIterable(IMetaStoreClient msc, String dbname, List<String> tableNames, int batchSize)
+      throws TException {
+    this.msc = msc;
+    this.catName = null;
+    this.dbname = dbname;
+    this.tableNames = tableNames;
+    this.batchSize = batchSize;
+  }
+
+  public TableIterable(IMetaStoreClient msc, String catName, String dbname, List<String>
+          tableNames, int batchSize) throws TException {
+    this.msc = msc;
+    this.catName = catName;
+    this.dbname = dbname;
+    this.tableNames = tableNames;
+    this.batchSize = batchSize;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/3e023546/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
new file mode 100644
index 0000000..f0d4427
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+import org.apache.thrift.TException;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.anyListOf;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit tests for TableIterable.
+ */
+@Category(MetastoreUnitTest.class)
+public class TestTableIterable  {
+
+  @Test
+  public void testNumReturned() throws MetaException, InvalidOperationException,
+          UnknownDBException, TException {
+    HiveMetaStoreClient msc = mock(HiveMetaStoreClient.class);
+
+
+    // create a mocked metastore client that returns 3 table objects every time it is called
+    // will use same size for TableIterable batch fetch size
+    List<Table> threeTables = Arrays.asList(new Table(), new Table(), new Table());
+    when(msc.getTableObjectsByName(anyString(), anyListOf(String.class))).thenReturn(threeTables);
+
+    List<String> tableNames = Arrays.asList("a", "b", "c", "d", "e", "f");
+    TableIterable tIterable = new TableIterable(msc, "dummy", tableNames,
+            threeTables.size());
+    tIterable.iterator();
+
+    Iterator<Table> tIter = tIterable.iterator();
+    int size = 0;
+    while(tIter.hasNext()) {
+      size++;
+      tIter.next();
+    }
+    assertEquals("Number of table objects returned", size, tableNames.size());
+
+    verify(msc).getTableObjectsByName("dummy", Arrays.asList("a", "b", "c"));
+    verify(msc).getTableObjectsByName("dummy", Arrays.asList("d", "e", "f"));
+
+  }
+}


[46/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
index 6d3ac0c,f0c308d..4467479
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
@@@ -755,14 -755,14 +755,14 @@@ import org.slf4j.LoggerFactory
            case 2: // POOLS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list880 = iprot.readListBegin();
-                 struct.pools = new ArrayList<WMPool>(_list880.size);
-                 WMPool _elem881;
-                 for (int _i882 = 0; _i882 < _list880.size; ++_i882)
 -                org.apache.thrift.protocol.TList _list864 = iprot.readListBegin();
 -                struct.pools = new ArrayList<WMPool>(_list864.size);
 -                WMPool _elem865;
 -                for (int _i866 = 0; _i866 < _list864.size; ++_i866)
++                org.apache.thrift.protocol.TList _list872 = iprot.readListBegin();
++                struct.pools = new ArrayList<WMPool>(_list872.size);
++                WMPool _elem873;
++                for (int _i874 = 0; _i874 < _list872.size; ++_i874)
                  {
-                   _elem881 = new WMPool();
-                   _elem881.read(iprot);
-                   struct.pools.add(_elem881);
 -                  _elem865 = new WMPool();
 -                  _elem865.read(iprot);
 -                  struct.pools.add(_elem865);
++                  _elem873 = new WMPool();
++                  _elem873.read(iprot);
++                  struct.pools.add(_elem873);
                  }
                  iprot.readListEnd();
                }
@@@ -774,14 -774,14 +774,14 @@@
            case 3: // MAPPINGS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list883 = iprot.readListBegin();
-                 struct.mappings = new ArrayList<WMMapping>(_list883.size);
-                 WMMapping _elem884;
-                 for (int _i885 = 0; _i885 < _list883.size; ++_i885)
 -                org.apache.thrift.protocol.TList _list867 = iprot.readListBegin();
 -                struct.mappings = new ArrayList<WMMapping>(_list867.size);
 -                WMMapping _elem868;
 -                for (int _i869 = 0; _i869 < _list867.size; ++_i869)
++                org.apache.thrift.protocol.TList _list875 = iprot.readListBegin();
++                struct.mappings = new ArrayList<WMMapping>(_list875.size);
++                WMMapping _elem876;
++                for (int _i877 = 0; _i877 < _list875.size; ++_i877)
                  {
-                   _elem884 = new WMMapping();
-                   _elem884.read(iprot);
-                   struct.mappings.add(_elem884);
 -                  _elem868 = new WMMapping();
 -                  _elem868.read(iprot);
 -                  struct.mappings.add(_elem868);
++                  _elem876 = new WMMapping();
++                  _elem876.read(iprot);
++                  struct.mappings.add(_elem876);
                  }
                  iprot.readListEnd();
                }
@@@ -793,14 -793,14 +793,14 @@@
            case 4: // TRIGGERS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list886 = iprot.readListBegin();
-                 struct.triggers = new ArrayList<WMTrigger>(_list886.size);
-                 WMTrigger _elem887;
-                 for (int _i888 = 0; _i888 < _list886.size; ++_i888)
 -                org.apache.thrift.protocol.TList _list870 = iprot.readListBegin();
 -                struct.triggers = new ArrayList<WMTrigger>(_list870.size);
 -                WMTrigger _elem871;
 -                for (int _i872 = 0; _i872 < _list870.size; ++_i872)
++                org.apache.thrift.protocol.TList _list878 = iprot.readListBegin();
++                struct.triggers = new ArrayList<WMTrigger>(_list878.size);
++                WMTrigger _elem879;
++                for (int _i880 = 0; _i880 < _list878.size; ++_i880)
                  {
-                   _elem887 = new WMTrigger();
-                   _elem887.read(iprot);
-                   struct.triggers.add(_elem887);
 -                  _elem871 = new WMTrigger();
 -                  _elem871.read(iprot);
 -                  struct.triggers.add(_elem871);
++                  _elem879 = new WMTrigger();
++                  _elem879.read(iprot);
++                  struct.triggers.add(_elem879);
                  }
                  iprot.readListEnd();
                }
@@@ -812,14 -812,14 +812,14 @@@
            case 5: // POOL_TRIGGERS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list889 = iprot.readListBegin();
-                 struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list889.size);
-                 WMPoolTrigger _elem890;
-                 for (int _i891 = 0; _i891 < _list889.size; ++_i891)
 -                org.apache.thrift.protocol.TList _list873 = iprot.readListBegin();
 -                struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list873.size);
 -                WMPoolTrigger _elem874;
 -                for (int _i875 = 0; _i875 < _list873.size; ++_i875)
++                org.apache.thrift.protocol.TList _list881 = iprot.readListBegin();
++                struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list881.size);
++                WMPoolTrigger _elem882;
++                for (int _i883 = 0; _i883 < _list881.size; ++_i883)
                  {
-                   _elem890 = new WMPoolTrigger();
-                   _elem890.read(iprot);
-                   struct.poolTriggers.add(_elem890);
 -                  _elem874 = new WMPoolTrigger();
 -                  _elem874.read(iprot);
 -                  struct.poolTriggers.add(_elem874);
++                  _elem882 = new WMPoolTrigger();
++                  _elem882.read(iprot);
++                  struct.poolTriggers.add(_elem882);
                  }
                  iprot.readListEnd();
                }
@@@ -850,9 -850,9 +850,9 @@@
          oprot.writeFieldBegin(POOLS_FIELD_DESC);
          {
            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size()));
-           for (WMPool _iter892 : struct.pools)
 -          for (WMPool _iter876 : struct.pools)
++          for (WMPool _iter884 : struct.pools)
            {
-             _iter892.write(oprot);
 -            _iter876.write(oprot);
++            _iter884.write(oprot);
            }
            oprot.writeListEnd();
          }
@@@ -863,9 -863,9 +863,9 @@@
            oprot.writeFieldBegin(MAPPINGS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size()));
-             for (WMMapping _iter893 : struct.mappings)
 -            for (WMMapping _iter877 : struct.mappings)
++            for (WMMapping _iter885 : struct.mappings)
              {
-               _iter893.write(oprot);
 -              _iter877.write(oprot);
++              _iter885.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -877,9 -877,9 +877,9 @@@
            oprot.writeFieldBegin(TRIGGERS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size()));
-             for (WMTrigger _iter894 : struct.triggers)
 -            for (WMTrigger _iter878 : struct.triggers)
++            for (WMTrigger _iter886 : struct.triggers)
              {
-               _iter894.write(oprot);
 -              _iter878.write(oprot);
++              _iter886.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -891,9 -891,9 +891,9 @@@
            oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size()));
-             for (WMPoolTrigger _iter895 : struct.poolTriggers)
 -            for (WMPoolTrigger _iter879 : struct.poolTriggers)
++            for (WMPoolTrigger _iter887 : struct.poolTriggers)
              {
-               _iter895.write(oprot);
 -              _iter879.write(oprot);
++              _iter887.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -920,9 -920,9 +920,9 @@@
        struct.plan.write(oprot);
        {
          oprot.writeI32(struct.pools.size());
-         for (WMPool _iter896 : struct.pools)
 -        for (WMPool _iter880 : struct.pools)
++        for (WMPool _iter888 : struct.pools)
          {
-           _iter896.write(oprot);
 -          _iter880.write(oprot);
++          _iter888.write(oprot);
          }
        }
        BitSet optionals = new BitSet();
@@@ -939,27 -939,27 +939,27 @@@
        if (struct.isSetMappings()) {
          {
            oprot.writeI32(struct.mappings.size());
-           for (WMMapping _iter897 : struct.mappings)
 -          for (WMMapping _iter881 : struct.mappings)
++          for (WMMapping _iter889 : struct.mappings)
            {
-             _iter897.write(oprot);
 -            _iter881.write(oprot);
++            _iter889.write(oprot);
            }
          }
        }
        if (struct.isSetTriggers()) {
          {
            oprot.writeI32(struct.triggers.size());
-           for (WMTrigger _iter898 : struct.triggers)
 -          for (WMTrigger _iter882 : struct.triggers)
++          for (WMTrigger _iter890 : struct.triggers)
            {
-             _iter898.write(oprot);
 -            _iter882.write(oprot);
++            _iter890.write(oprot);
            }
          }
        }
        if (struct.isSetPoolTriggers()) {
          {
            oprot.writeI32(struct.poolTriggers.size());
-           for (WMPoolTrigger _iter899 : struct.poolTriggers)
 -          for (WMPoolTrigger _iter883 : struct.poolTriggers)
++          for (WMPoolTrigger _iter891 : struct.poolTriggers)
            {
-             _iter899.write(oprot);
 -            _iter883.write(oprot);
++            _iter891.write(oprot);
            }
          }
        }
@@@ -972,56 -972,56 +972,56 @@@
        struct.plan.read(iprot);
        struct.setPlanIsSet(true);
        {
-         org.apache.thrift.protocol.TList _list900 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-         struct.pools = new ArrayList<WMPool>(_list900.size);
-         WMPool _elem901;
-         for (int _i902 = 0; _i902 < _list900.size; ++_i902)
 -        org.apache.thrift.protocol.TList _list884 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -        struct.pools = new ArrayList<WMPool>(_list884.size);
 -        WMPool _elem885;
 -        for (int _i886 = 0; _i886 < _list884.size; ++_i886)
++        org.apache.thrift.protocol.TList _list892 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++        struct.pools = new ArrayList<WMPool>(_list892.size);
++        WMPool _elem893;
++        for (int _i894 = 0; _i894 < _list892.size; ++_i894)
          {
-           _elem901 = new WMPool();
-           _elem901.read(iprot);
-           struct.pools.add(_elem901);
 -          _elem885 = new WMPool();
 -          _elem885.read(iprot);
 -          struct.pools.add(_elem885);
++          _elem893 = new WMPool();
++          _elem893.read(iprot);
++          struct.pools.add(_elem893);
          }
        }
        struct.setPoolsIsSet(true);
        BitSet incoming = iprot.readBitSet(3);
        if (incoming.get(0)) {
          {
-           org.apache.thrift.protocol.TList _list903 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.mappings = new ArrayList<WMMapping>(_list903.size);
-           WMMapping _elem904;
-           for (int _i905 = 0; _i905 < _list903.size; ++_i905)
 -          org.apache.thrift.protocol.TList _list887 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.mappings = new ArrayList<WMMapping>(_list887.size);
 -          WMMapping _elem888;
 -          for (int _i889 = 0; _i889 < _list887.size; ++_i889)
++          org.apache.thrift.protocol.TList _list895 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.mappings = new ArrayList<WMMapping>(_list895.size);
++          WMMapping _elem896;
++          for (int _i897 = 0; _i897 < _list895.size; ++_i897)
            {
-             _elem904 = new WMMapping();
-             _elem904.read(iprot);
-             struct.mappings.add(_elem904);
 -            _elem888 = new WMMapping();
 -            _elem888.read(iprot);
 -            struct.mappings.add(_elem888);
++            _elem896 = new WMMapping();
++            _elem896.read(iprot);
++            struct.mappings.add(_elem896);
            }
          }
          struct.setMappingsIsSet(true);
        }
        if (incoming.get(1)) {
          {
-           org.apache.thrift.protocol.TList _list906 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.triggers = new ArrayList<WMTrigger>(_list906.size);
-           WMTrigger _elem907;
-           for (int _i908 = 0; _i908 < _list906.size; ++_i908)
 -          org.apache.thrift.protocol.TList _list890 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.triggers = new ArrayList<WMTrigger>(_list890.size);
 -          WMTrigger _elem891;
 -          for (int _i892 = 0; _i892 < _list890.size; ++_i892)
++          org.apache.thrift.protocol.TList _list898 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.triggers = new ArrayList<WMTrigger>(_list898.size);
++          WMTrigger _elem899;
++          for (int _i900 = 0; _i900 < _list898.size; ++_i900)
            {
-             _elem907 = new WMTrigger();
-             _elem907.read(iprot);
-             struct.triggers.add(_elem907);
 -            _elem891 = new WMTrigger();
 -            _elem891.read(iprot);
 -            struct.triggers.add(_elem891);
++            _elem899 = new WMTrigger();
++            _elem899.read(iprot);
++            struct.triggers.add(_elem899);
            }
          }
          struct.setTriggersIsSet(true);
        }
        if (incoming.get(2)) {
          {
-           org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list909.size);
-           WMPoolTrigger _elem910;
-           for (int _i911 = 0; _i911 < _list909.size; ++_i911)
 -          org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list893.size);
 -          WMPoolTrigger _elem894;
 -          for (int _i895 = 0; _i895 < _list893.size; ++_i895)
++          org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list901.size);
++          WMPoolTrigger _elem902;
++          for (int _i903 = 0; _i903 < _list901.size; ++_i903)
            {
-             _elem910 = new WMPoolTrigger();
-             _elem910.read(iprot);
-             struct.poolTriggers.add(_elem910);
 -            _elem894 = new WMPoolTrigger();
 -            _elem894.read(iprot);
 -            struct.poolTriggers.add(_elem894);
++            _elem902 = new WMPoolTrigger();
++            _elem902.read(iprot);
++            struct.poolTriggers.add(_elem902);
            }
          }
          struct.setPoolTriggersIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
index 2c427b9,6eed84b..c6cb845
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
@@@ -346,14 -346,14 +346,14 @@@ import org.slf4j.LoggerFactory
            case 1: // RESOURCE_PLANS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list912 = iprot.readListBegin();
-                 struct.resourcePlans = new ArrayList<WMResourcePlan>(_list912.size);
-                 WMResourcePlan _elem913;
-                 for (int _i914 = 0; _i914 < _list912.size; ++_i914)
 -                org.apache.thrift.protocol.TList _list896 = iprot.readListBegin();
 -                struct.resourcePlans = new ArrayList<WMResourcePlan>(_list896.size);
 -                WMResourcePlan _elem897;
 -                for (int _i898 = 0; _i898 < _list896.size; ++_i898)
++                org.apache.thrift.protocol.TList _list904 = iprot.readListBegin();
++                struct.resourcePlans = new ArrayList<WMResourcePlan>(_list904.size);
++                WMResourcePlan _elem905;
++                for (int _i906 = 0; _i906 < _list904.size; ++_i906)
                  {
-                   _elem913 = new WMResourcePlan();
-                   _elem913.read(iprot);
-                   struct.resourcePlans.add(_elem913);
 -                  _elem897 = new WMResourcePlan();
 -                  _elem897.read(iprot);
 -                  struct.resourcePlans.add(_elem897);
++                  _elem905 = new WMResourcePlan();
++                  _elem905.read(iprot);
++                  struct.resourcePlans.add(_elem905);
                  }
                  iprot.readListEnd();
                }
@@@ -380,9 -380,9 +380,9 @@@
            oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size()));
-             for (WMResourcePlan _iter915 : struct.resourcePlans)
 -            for (WMResourcePlan _iter899 : struct.resourcePlans)
++            for (WMResourcePlan _iter907 : struct.resourcePlans)
              {
-               _iter915.write(oprot);
 -              _iter899.write(oprot);
++              _iter907.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -414,9 -414,9 +414,9 @@@
        if (struct.isSetResourcePlans()) {
          {
            oprot.writeI32(struct.resourcePlans.size());
-           for (WMResourcePlan _iter916 : struct.resourcePlans)
 -          for (WMResourcePlan _iter900 : struct.resourcePlans)
++          for (WMResourcePlan _iter908 : struct.resourcePlans)
            {
-             _iter916.write(oprot);
 -            _iter900.write(oprot);
++            _iter908.write(oprot);
            }
          }
        }
@@@ -428,14 -428,14 +428,14 @@@
        BitSet incoming = iprot.readBitSet(1);
        if (incoming.get(0)) {
          {
-           org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.resourcePlans = new ArrayList<WMResourcePlan>(_list917.size);
-           WMResourcePlan _elem918;
-           for (int _i919 = 0; _i919 < _list917.size; ++_i919)
 -          org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.resourcePlans = new ArrayList<WMResourcePlan>(_list901.size);
 -          WMResourcePlan _elem902;
 -          for (int _i903 = 0; _i903 < _list901.size; ++_i903)
++          org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.resourcePlans = new ArrayList<WMResourcePlan>(_list909.size);
++          WMResourcePlan _elem910;
++          for (int _i911 = 0; _i911 < _list909.size; ++_i911)
            {
-             _elem918 = new WMResourcePlan();
-             _elem918.read(iprot);
-             struct.resourcePlans.add(_elem918);
 -            _elem902 = new WMResourcePlan();
 -            _elem902.read(iprot);
 -            struct.resourcePlans.add(_elem902);
++            _elem910 = new WMResourcePlan();
++            _elem910.read(iprot);
++            struct.resourcePlans.add(_elem910);
            }
          }
          struct.setResourcePlansIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
index 57615c0,53ea5d5..9eed335
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
@@@ -346,14 -346,14 +346,14 @@@ import org.slf4j.LoggerFactory
            case 1: // TRIGGERS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list936 = iprot.readListBegin();
-                 struct.triggers = new ArrayList<WMTrigger>(_list936.size);
-                 WMTrigger _elem937;
-                 for (int _i938 = 0; _i938 < _list936.size; ++_i938)
 -                org.apache.thrift.protocol.TList _list920 = iprot.readListBegin();
 -                struct.triggers = new ArrayList<WMTrigger>(_list920.size);
 -                WMTrigger _elem921;
 -                for (int _i922 = 0; _i922 < _list920.size; ++_i922)
++                org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
++                struct.triggers = new ArrayList<WMTrigger>(_list928.size);
++                WMTrigger _elem929;
++                for (int _i930 = 0; _i930 < _list928.size; ++_i930)
                  {
-                   _elem937 = new WMTrigger();
-                   _elem937.read(iprot);
-                   struct.triggers.add(_elem937);
 -                  _elem921 = new WMTrigger();
 -                  _elem921.read(iprot);
 -                  struct.triggers.add(_elem921);
++                  _elem929 = new WMTrigger();
++                  _elem929.read(iprot);
++                  struct.triggers.add(_elem929);
                  }
                  iprot.readListEnd();
                }
@@@ -380,9 -380,9 +380,9 @@@
            oprot.writeFieldBegin(TRIGGERS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size()));
-             for (WMTrigger _iter939 : struct.triggers)
 -            for (WMTrigger _iter923 : struct.triggers)
++            for (WMTrigger _iter931 : struct.triggers)
              {
-               _iter939.write(oprot);
 -              _iter923.write(oprot);
++              _iter931.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -414,9 -414,9 +414,9 @@@
        if (struct.isSetTriggers()) {
          {
            oprot.writeI32(struct.triggers.size());
-           for (WMTrigger _iter940 : struct.triggers)
 -          for (WMTrigger _iter924 : struct.triggers)
++          for (WMTrigger _iter932 : struct.triggers)
            {
-             _iter940.write(oprot);
 -            _iter924.write(oprot);
++            _iter932.write(oprot);
            }
          }
        }
@@@ -428,14 -428,14 +428,14 @@@
        BitSet incoming = iprot.readBitSet(1);
        if (incoming.get(0)) {
          {
-           org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-           struct.triggers = new ArrayList<WMTrigger>(_list941.size);
-           WMTrigger _elem942;
-           for (int _i943 = 0; _i943 < _list941.size; ++_i943)
 -          org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -          struct.triggers = new ArrayList<WMTrigger>(_list925.size);
 -          WMTrigger _elem926;
 -          for (int _i927 = 0; _i927 < _list925.size; ++_i927)
++          org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++          struct.triggers = new ArrayList<WMTrigger>(_list933.size);
++          WMTrigger _elem934;
++          for (int _i935 = 0; _i935 < _list933.size; ++_i935)
            {
-             _elem942 = new WMTrigger();
-             _elem942.read(iprot);
-             struct.triggers.add(_elem942);
 -            _elem926 = new WMTrigger();
 -            _elem926.read(iprot);
 -            struct.triggers.add(_elem926);
++            _elem934 = new WMTrigger();
++            _elem934.read(iprot);
++            struct.triggers.add(_elem934);
            }
          }
          struct.setTriggersIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
index 3f7f953,0dd8a5e..ee9251c
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
@@@ -441,13 -441,13 +441,13 @@@ import org.slf4j.LoggerFactory
            case 1: // ERRORS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list920 = iprot.readListBegin();
-                 struct.errors = new ArrayList<String>(_list920.size);
-                 String _elem921;
-                 for (int _i922 = 0; _i922 < _list920.size; ++_i922)
 -                org.apache.thrift.protocol.TList _list904 = iprot.readListBegin();
 -                struct.errors = new ArrayList<String>(_list904.size);
 -                String _elem905;
 -                for (int _i906 = 0; _i906 < _list904.size; ++_i906)
++                org.apache.thrift.protocol.TList _list912 = iprot.readListBegin();
++                struct.errors = new ArrayList<String>(_list912.size);
++                String _elem913;
++                for (int _i914 = 0; _i914 < _list912.size; ++_i914)
                  {
-                   _elem921 = iprot.readString();
-                   struct.errors.add(_elem921);
 -                  _elem905 = iprot.readString();
 -                  struct.errors.add(_elem905);
++                  _elem913 = iprot.readString();
++                  struct.errors.add(_elem913);
                  }
                  iprot.readListEnd();
                }
@@@ -459,13 -459,13 +459,13 @@@
            case 2: // WARNINGS
              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                {
-                 org.apache.thrift.protocol.TList _list923 = iprot.readListBegin();
-                 struct.warnings = new ArrayList<String>(_list923.size);
-                 String _elem924;
-                 for (int _i925 = 0; _i925 < _list923.size; ++_i925)
 -                org.apache.thrift.protocol.TList _list907 = iprot.readListBegin();
 -                struct.warnings = new ArrayList<String>(_list907.size);
 -                String _elem908;
 -                for (int _i909 = 0; _i909 < _list907.size; ++_i909)
++                org.apache.thrift.protocol.TList _list915 = iprot.readListBegin();
++                struct.warnings = new ArrayList<String>(_list915.size);
++                String _elem916;
++                for (int _i917 = 0; _i917 < _list915.size; ++_i917)
                  {
-                   _elem924 = iprot.readString();
-                   struct.warnings.add(_elem924);
 -                  _elem908 = iprot.readString();
 -                  struct.warnings.add(_elem908);
++                  _elem916 = iprot.readString();
++                  struct.warnings.add(_elem916);
                  }
                  iprot.readListEnd();
                }
@@@ -492,9 -492,9 +492,9 @@@
            oprot.writeFieldBegin(ERRORS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size()));
-             for (String _iter926 : struct.errors)
 -            for (String _iter910 : struct.errors)
++            for (String _iter918 : struct.errors)
              {
-               oprot.writeString(_iter926);
 -              oprot.writeString(_iter910);
++              oprot.writeString(_iter918);
              }
              oprot.writeListEnd();
            }
@@@ -506,9 -506,9 +506,9 @@@
            oprot.writeFieldBegin(WARNINGS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size()));
-             for (String _iter927 : struct.warnings)
 -            for (String _iter911 : struct.warnings)
++            for (String _iter919 : struct.warnings)
              {
-               oprot.writeString(_iter927);
 -              oprot.writeString(_iter911);
++              oprot.writeString(_iter919);
              }
              oprot.writeListEnd();
            }
@@@ -543,18 -543,18 +543,18 @@@
        if (struct.isSetErrors()) {
          {
            oprot.writeI32(struct.errors.size());
-           for (String _iter928 : struct.errors)
 -          for (String _iter912 : struct.errors)
++          for (String _iter920 : struct.errors)
            {
-             oprot.writeString(_iter928);
 -            oprot.writeString(_iter912);
++            oprot.writeString(_iter920);
            }
          }
        }
        if (struct.isSetWarnings()) {
          {
            oprot.writeI32(struct.warnings.size());
-           for (String _iter929 : struct.warnings)
 -          for (String _iter913 : struct.warnings)
++          for (String _iter921 : struct.warnings)
            {
-             oprot.writeString(_iter929);
 -            oprot.writeString(_iter913);
++            oprot.writeString(_iter921);
            }
          }
        }
@@@ -566,26 -566,26 +566,26 @@@
        BitSet incoming = iprot.readBitSet(2);
        if (incoming.get(0)) {
          {
-           org.apache.thrift.protocol.TList _list930 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-           struct.errors = new ArrayList<String>(_list930.size);
-           String _elem931;
-           for (int _i932 = 0; _i932 < _list930.size; ++_i932)
 -          org.apache.thrift.protocol.TList _list914 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -          struct.errors = new ArrayList<String>(_list914.size);
 -          String _elem915;
 -          for (int _i916 = 0; _i916 < _list914.size; ++_i916)
++          org.apache.thrift.protocol.TList _list922 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++          struct.errors = new ArrayList<String>(_list922.size);
++          String _elem923;
++          for (int _i924 = 0; _i924 < _list922.size; ++_i924)
            {
-             _elem931 = iprot.readString();
-             struct.errors.add(_elem931);
 -            _elem915 = iprot.readString();
 -            struct.errors.add(_elem915);
++            _elem923 = iprot.readString();
++            struct.errors.add(_elem923);
            }
          }
          struct.setErrorsIsSet(true);
        }
        if (incoming.get(1)) {
          {
-           org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-           struct.warnings = new ArrayList<String>(_list933.size);
-           String _elem934;
-           for (int _i935 = 0; _i935 < _list933.size; ++_i935)
 -          org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -          struct.warnings = new ArrayList<String>(_list917.size);
 -          String _elem918;
 -          for (int _i919 = 0; _i919 < _list917.size; ++_i919)
++          org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++          struct.warnings = new ArrayList<String>(_list925.size);
++          String _elem926;
++          for (int _i927 = 0; _i927 < _list925.size; ++_i927)
            {
-             _elem934 = iprot.readString();
-             struct.warnings.add(_elem934);
 -            _elem918 = iprot.readString();
 -            struct.warnings.add(_elem918);
++            _elem926 = iprot.readString();
++            struct.warnings.add(_elem926);
            }
          }
          struct.setWarningsIsSet(true);


[30/48] hive git commit: HIVE-20174: Vectorization: Fix NULL / Wrong Results issues in GROUP BY Aggregation Functions (Matt McCline, reviewed by Teddy Choi)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
index e54ccaa..9020016 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorIfStatement.java
@@ -199,10 +199,12 @@ public class TestVectorIfStatement {
       boolean isNullScalar1, boolean isNullScalar2)
           throws Exception {
 
+    /*
     System.out.println("*DEBUG* typeName " + typeName +
         " columnScalarMode " + columnScalarMode +
         " isNullScalar1 " + isNullScalar1 +
         " isNullScalar2 " + isNullScalar2);
+    */
 
     TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(typeName);
 
@@ -457,11 +459,13 @@ public class TestVectorIfStatement {
     resultVectorExtractRow.init(new TypeInfo[] { typeInfo }, new int[] { columns.size() });
     Object[] scrqtchRow = new Object[1];
 
+    /*
     System.out.println(
         "*DEBUG* typeInfo " + typeInfo.toString() +
         " ifStmtTestMode " + ifStmtTestMode +
         " columnScalarMode " + columnScalarMode +
         " vectorExpression " + vectorExpression.getClass().getSimpleName());
+    */
 
     batchSource.resetBatchIteration();
     int rowIndex = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorNegative.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorNegative.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorNegative.java
index ce20f28..d43249e 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorNegative.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorNegative.java
@@ -330,10 +330,12 @@ public class TestVectorNegative {
       ObjectInspector rowInspector,
       TypeInfo outputTypeInfo, Object[] resultObjects) throws Exception {
 
+    /*
     System.out.println(
         "*DEBUG* typeInfo " + typeInfo.toString() +
         " negativeTestMode ROW_MODE" +
         " exprDesc " + exprDesc.toString());
+    */
 
     HiveConf hiveConf = new HiveConf();
     ExprNodeEvaluator evaluator =
@@ -425,10 +427,13 @@ public class TestVectorNegative {
         new TypeInfo[] { outputTypeInfo }, new int[] { vectorExpression.getOutputColumnNum() });
     Object[] scrqtchRow = new Object[1];
 
+    /*
     System.out.println(
         "*DEBUG* typeInfo " + typeInfo.toString() +
         " negativeTestMode " + negativeTestMode +
         " vectorExpression " + vectorExpression.toString());
+    */
+
     batchSource.resetBatchIteration();
     int rowIndex = 0;
     while (true) {

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringConcat.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringConcat.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringConcat.java
index a87a8b4..f3050c2 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringConcat.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringConcat.java
@@ -305,12 +305,14 @@ public class TestVectorStringConcat {
       ObjectInspector rowInspector,
       GenericUDF genericUdf, Object[] resultObjects) throws Exception {
 
+    /*
     System.out.println(
         "*DEBUG* stringTypeInfo " + stringTypeInfo.toString() +
         " integerTypeInfo " + integerTypeInfo +
         " stringConcatTestMode ROW_MODE" +
         " columnScalarMode " + columnScalarMode +
         " genericUdf " + genericUdf.toString());
+    */
 
     ExprNodeGenericFuncDesc exprDesc =
         new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, genericUdf, children);
@@ -405,12 +407,14 @@ public class TestVectorStringConcat {
         new TypeInfo[] { outputTypeInfo }, new int[] { columns.size() });
     Object[] scrqtchRow = new Object[1];
 
+    /*
     System.out.println(
         "*DEBUG* stringTypeInfo1 " + stringTypeInfo1.toString() +
         " stringTypeInfo2 " + stringTypeInfo2.toString() +
         " stringConcatTestMode " + stringConcatTestMode +
         " columnScalarMode " + columnScalarMode +
         " vectorExpression " + vectorExpression.toString());
+    */
 
     batchSource.resetBatchIteration();
     int rowIndex = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringUnary.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringUnary.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringUnary.java
index 90f7992..8df5595 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringUnary.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorStringUnary.java
@@ -347,11 +347,13 @@ public class TestVectorStringUnary {
     resultVectorExtractRow.init(new TypeInfo[] { targetTypeInfo }, new int[] { columns.size() });
     Object[] scrqtchRow = new Object[1];
 
+    /*
     System.out.println(
         "*DEBUG* typeInfo " + typeInfo.toString() +
         " targetTypeInfo " + targetTypeInfo.toString() +
         " stringUnaryTestMode " + stringUnaryTestMode +
         " vectorExpression " + vectorExpression.getClass().getSimpleName());
+    */
 
     batchSource.resetBatchIteration();
     int rowIndex = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorSubStr.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorSubStr.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorSubStr.java
index 284a47a..b1344ab 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorSubStr.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorSubStr.java
@@ -326,11 +326,13 @@ public class TestVectorSubStr {
     resultVectorExtractRow.init(new TypeInfo[] { targetTypeInfo }, new int[] { columns.size() });
     Object[] scrqtchRow = new Object[1];
 
+    /*
     System.out.println(
         "*DEBUG* typeInfo " + typeInfo.toString() +
         " targetTypeInfo " + targetTypeInfo.toString() +
         " subStrTestMode " + subStrTestMode +
         " vectorExpression " + vectorExpression.getClass().getSimpleName());
+    */
 
     batchSource.resetBatchIteration();
     int rowIndex = 0;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTimestampExtract.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTimestampExtract.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTimestampExtract.java
index 58e3fa3..e56a6c3 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTimestampExtract.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTimestampExtract.java
@@ -283,10 +283,12 @@ public class TestVectorTimestampExtract {
       Object[][] randomRows, ObjectInspector rowInspector, Object[] resultObjects)
           throws Exception {
 
+    /*
     System.out.println(
         "*DEBUG* dateTimeStringTypeInfo " + dateTimeStringTypeInfo.toString() +
         " timestampExtractTestMode ROW_MODE" +
         " exprDesc " + exprDesc.toString());
+    */
 
     HiveConf hiveConf = new HiveConf();
     ExprNodeEvaluator evaluator =
@@ -392,10 +394,12 @@ public class TestVectorTimestampExtract {
     VectorExpression vectorExpression = vectorizationContext.getVectorExpression(exprDesc);
     vectorExpression.transientInit();
 
+    /*
     System.out.println(
         "*DEBUG* dateTimeStringTypeInfo " + dateTimeStringTypeInfo.toString() +
         " timestampExtractTestMode " + timestampExtractTestMode +
         " vectorExpression " + vectorExpression.getClass().getSimpleName());
+    */
 
     VectorRandomRowSource rowSource = batchSource.getRowSource();
     VectorizedRowBatchCtx batchContext =

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
index d9fc060..2a2bbe1 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestVectorizer.java
@@ -112,7 +112,10 @@ public class TestVectorizer {
     vectorDesc.setVecAggrDescs(
         new VectorAggregationDesc[] {
           new VectorAggregationDesc(
-              aggDesc, new GenericUDAFSum.GenericUDAFSumLong(), TypeInfoFactory.longTypeInfo, ColumnVector.Type.LONG, null,
+              aggDesc.getGenericUDAFName(),
+              new GenericUDAFSum.GenericUDAFSumLong(),
+              aggDesc.getMode(),
+              TypeInfoFactory.longTypeInfo, ColumnVector.Type.LONG, null,
               TypeInfoFactory.longTypeInfo, ColumnVector.Type.LONG, VectorUDAFCountStar.class)});
 
     desc.setOutputColumnNames(outputColumnNames);


[15/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query16.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query16.q.out b/ql/src/test/results/clientpositive/perf/tez/query16.q.out
index 0b64c55..5652f3b 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query16.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query16.q.out
@@ -80,22 +80,22 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 9 vectorized
-      File Output Operator [FS_174]
-        Limit [LIM_173] (rows=1 width=344)
+      File Output Operator [FS_176]
+        Limit [LIM_175] (rows=1 width=344)
           Number of rows:100
-          Select Operator [SEL_172] (rows=1 width=344)
+          Select Operator [SEL_174] (rows=1 width=344)
             Output:["_col0","_col1","_col2"]
           <-Reducer 8 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_171]
-              Select Operator [SEL_170] (rows=1 width=344)
+            SHUFFLE [RS_173]
+              Select Operator [SEL_172] (rows=1 width=344)
                 Output:["_col1","_col2","_col3"]
-                Group By Operator [GBY_169] (rows=1 width=344)
+                Group By Operator [GBY_171] (rows=1 width=344)
                   Output:["_col0","_col1","_col2"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"]
                 <-Reducer 7 [CUSTOM_SIMPLE_EDGE] vectorized
-                  PARTITION_ONLY_SHUFFLE [RS_168]
-                    Group By Operator [GBY_167] (rows=1 width=344)
+                  PARTITION_ONLY_SHUFFLE [RS_170]
+                    Group By Operator [GBY_169] (rows=1 width=344)
                       Output:["_col0","_col1","_col2"],aggregations:["count(_col0)","sum(_col1)","sum(_col2)"]
-                      Group By Operator [GBY_166] (rows=231905279 width=135)
+                      Group By Operator [GBY_168] (rows=231905279 width=135)
                         Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                       <-Reducer 6 [SIMPLE_EDGE]
                         SHUFFLE [RS_74]
@@ -106,21 +106,21 @@ Stage-0
                               Output:["_col4","_col5","_col6"]
                               Filter Operator [FIL_41] (rows=231905279 width=135)
                                 predicate:_col14 is null
-                                Merge Join Operator [MERGEJOIN_128] (rows=463810558 width=135)
-                                  Conds:RS_38._col4=RS_165._col0(Left Outer),Output:["_col4","_col5","_col6","_col14"]
+                                Merge Join Operator [MERGEJOIN_130] (rows=463810558 width=135)
+                                  Conds:RS_38._col4=RS_167._col0(Left Outer),Output:["_col4","_col5","_col6","_col14"]
                                 <-Reducer 18 [ONE_TO_ONE_EDGE] vectorized
-                                  FORWARD [RS_165]
+                                  FORWARD [RS_167]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_164] (rows=14399440 width=106)
+                                    Select Operator [SEL_166] (rows=14399440 width=106)
                                       Output:["_col0","_col1"]
-                                      Group By Operator [GBY_163] (rows=14399440 width=106)
+                                      Group By Operator [GBY_165] (rows=14399440 width=106)
                                         Output:["_col0"],keys:KEY._col0
                                       <-Map 17 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_162]
+                                        SHUFFLE [RS_164]
                                           PartitionCols:_col0
-                                          Group By Operator [GBY_161] (rows=28798881 width=106)
+                                          Group By Operator [GBY_163] (rows=28798881 width=106)
                                             Output:["_col0"],keys:cr_order_number
-                                            Filter Operator [FIL_160] (rows=28798881 width=106)
+                                            Filter Operator [FIL_162] (rows=28798881 width=106)
                                               predicate:cr_order_number is not null
                                               TableScan [TS_25] (rows=28798881 width=106)
                                                 default@catalog_returns,cr1,Tbl:COMPLETE,Col:NONE,Output:["cr_order_number"]
@@ -129,101 +129,101 @@ Stage-0
                                     PartitionCols:_col4
                                     Select Operator [SEL_37] (rows=421645953 width=135)
                                       Output:["_col4","_col5","_col6"]
-                                      Merge Join Operator [MERGEJOIN_127] (rows=421645953 width=135)
-                                        Conds:RS_34._col4=RS_159._col0(Left Semi),Output:["_col3","_col4","_col5","_col6","_col14"],residual filter predicates:{(_col3 <> _col14)}
+                                      Merge Join Operator [MERGEJOIN_129] (rows=421645953 width=135)
+                                        Conds:RS_34._col4=RS_161._col0(Left Semi),Output:["_col3","_col4","_col5","_col6","_col14"],residual filter predicates:{(_col3 <> _col14)}
                                       <-Map 16 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_159]
+                                        SHUFFLE [RS_161]
                                           PartitionCols:_col0
-                                          Group By Operator [GBY_158] (rows=287989836 width=135)
+                                          Group By Operator [GBY_160] (rows=287989836 width=135)
                                             Output:["_col0","_col1"],keys:_col0, _col1
-                                            Select Operator [SEL_157] (rows=287989836 width=135)
+                                            Select Operator [SEL_159] (rows=287989836 width=135)
                                               Output:["_col0","_col1"]
-                                              Filter Operator [FIL_156] (rows=287989836 width=135)
+                                              Filter Operator [FIL_158] (rows=287989836 width=135)
                                                 predicate:(cs_order_number is not null and cs_warehouse_sk is not null)
                                                 TableScan [TS_22] (rows=287989836 width=135)
                                                   default@catalog_sales,cs2,Tbl:COMPLETE,Col:NONE,Output:["cs_warehouse_sk","cs_order_number"]
                                       <-Reducer 4 [SIMPLE_EDGE]
                                         SHUFFLE [RS_34]
                                           PartitionCols:_col4
-                                          Merge Join Operator [MERGEJOIN_126] (rows=383314495 width=135)
-                                            Conds:RS_18._col2=RS_147._col0(Inner),Output:["_col3","_col4","_col5","_col6"]
+                                          Merge Join Operator [MERGEJOIN_128] (rows=383314495 width=135)
+                                            Conds:RS_18._col2=RS_149._col0(Inner),Output:["_col3","_col4","_col5","_col6"]
                                           <-Map 14 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_147]
+                                            SHUFFLE [RS_149]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_146] (rows=30 width=2045)
+                                              Select Operator [SEL_148] (rows=30 width=2045)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_145] (rows=30 width=2045)
+                                                Filter Operator [FIL_147] (rows=30 width=2045)
                                                   predicate:((cc_county) IN ('Ziebach County', 'Levy County', 'Huron County', 'Franklin Parish', 'Daviess County') and cc_call_center_sk is not null)
                                                   TableScan [TS_9] (rows=60 width=2045)
                                                     default@call_center,call_center,Tbl:COMPLETE,Col:NONE,Output:["cc_call_center_sk","cc_county"]
                                           <-Reducer 3 [SIMPLE_EDGE]
                                             SHUFFLE [RS_18]
                                               PartitionCols:_col2
-                                              Merge Join Operator [MERGEJOIN_125] (rows=348467716 width=135)
-                                                Conds:RS_15._col1=RS_139._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6"]
+                                              Merge Join Operator [MERGEJOIN_127] (rows=348467716 width=135)
+                                                Conds:RS_15._col1=RS_141._col0(Inner),Output:["_col2","_col3","_col4","_col5","_col6"]
                                               <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_139]
+                                                SHUFFLE [RS_141]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_138] (rows=20000000 width=1014)
+                                                  Select Operator [SEL_140] (rows=20000000 width=1014)
                                                     Output:["_col0"]
-                                                    Filter Operator [FIL_137] (rows=20000000 width=1014)
+                                                    Filter Operator [FIL_139] (rows=20000000 width=1014)
                                                       predicate:((ca_state = 'NY') and ca_address_sk is not null)
                                                       TableScan [TS_6] (rows=40000000 width=1014)
                                                         default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
                                               <-Reducer 2 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_15]
                                                   PartitionCols:_col1
-                                                  Merge Join Operator [MERGEJOIN_124] (rows=316788826 width=135)
-                                                    Conds:RS_155._col0=RS_131._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
+                                                  Merge Join Operator [MERGEJOIN_126] (rows=316788826 width=135)
+                                                    Conds:RS_157._col0=RS_133._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6"]
                                                   <-Map 10 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_131]
+                                                    SHUFFLE [RS_133]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_130] (rows=8116 width=1119)
+                                                      Select Operator [SEL_132] (rows=8116 width=1119)
                                                         Output:["_col0"]
-                                                        Filter Operator [FIL_129] (rows=8116 width=1119)
+                                                        Filter Operator [FIL_131] (rows=8116 width=1119)
                                                           predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'2001-04-01 00:00:00' AND TIMESTAMP'2001-05-31 00:00:00' and d_date_sk is not null)
                                                           TableScan [TS_3] (rows=73049 width=1119)
                                                             default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                                   <-Map 1 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_155]
+                                                    SHUFFLE [RS_157]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_154] (rows=287989836 width=135)
+                                                      Select Operator [SEL_156] (rows=287989836 width=135)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_153] (rows=287989836 width=135)
+                                                        Filter Operator [FIL_155] (rows=287989836 width=135)
                                                           predicate:((cs_call_center_sk BETWEEN DynamicValue(RS_19_call_center_cc_call_center_sk_min) AND DynamicValue(RS_19_call_center_cc_call_center_sk_max) and in_bloom_filter(cs_call_center_sk, DynamicValue(RS_19_call_center_cc_call_center_sk_bloom_filter))) and (cs_ship_addr_sk BETWEEN DynamicValue(RS_16_customer_address_ca_address_sk_min) AND DynamicValue(RS_16_customer_address_ca_address_sk_max) and in_bloom_filter(cs_ship_addr_sk, DynamicValue(RS_16_customer_address_ca_address_sk_bloom_filter))) and (cs_ship_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(cs_ship_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and cs_call_center_sk is not null and cs_order_number is not null and cs_ship_addr_sk is not null and cs_ship_date_sk is not null)
                                                           TableScan [TS_0] (rows=287989836 width=135)
                                                             default@catalog_sales,cs1,Tbl:COMPLETE,Col:NONE,Output:["cs_ship_date_sk","cs_ship_addr_sk","cs_call_center_sk","cs_warehouse_sk","cs_order_number","cs_ext_ship_cost","cs_net_profit"]
                                                           <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_136]
-                                                              Group By Operator [GBY_135] (rows=1 width=12)
+                                                            BROADCAST [RS_138]
+                                                              Group By Operator [GBY_137] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_134]
-                                                                  Group By Operator [GBY_133] (rows=1 width=12)
+                                                                SHUFFLE [RS_136]
+                                                                  Group By Operator [GBY_135] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_132] (rows=8116 width=1119)
+                                                                    Select Operator [SEL_134] (rows=8116 width=1119)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_130]
+                                                                       Please refer to the previous Select Operator [SEL_132]
                                                           <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_144]
-                                                              Group By Operator [GBY_143] (rows=1 width=12)
+                                                            BROADCAST [RS_146]
+                                                              Group By Operator [GBY_145] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=20000000)"]
                                                               <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_142]
-                                                                  Group By Operator [GBY_141] (rows=1 width=12)
+                                                                SHUFFLE [RS_144]
+                                                                  Group By Operator [GBY_143] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=20000000)"]
-                                                                    Select Operator [SEL_140] (rows=20000000 width=1014)
+                                                                    Select Operator [SEL_142] (rows=20000000 width=1014)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_138]
+                                                                       Please refer to the previous Select Operator [SEL_140]
                                                           <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_152]
-                                                              Group By Operator [GBY_151] (rows=1 width=12)
+                                                            BROADCAST [RS_154]
+                                                              Group By Operator [GBY_153] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_150]
-                                                                  Group By Operator [GBY_149] (rows=1 width=12)
+                                                                SHUFFLE [RS_152]
+                                                                  Group By Operator [GBY_151] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_148] (rows=30 width=2045)
+                                                                    Select Operator [SEL_150] (rows=30 width=2045)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_146]
+                                                                       Please refer to the previous Select Operator [SEL_148]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query17.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query17.q.out b/ql/src/test/results/clientpositive/perf/tez/query17.q.out
index 2e5e254..620d88a 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query17.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query17.q.out
@@ -89,8 +89,8 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 1 <- Reducer 17 (BROADCAST_EDGE), Reducer 21 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
-Map 18 <- Reducer 12 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE), Reducer 15 (BROADCAST_EDGE)
+Map 1 <- Reducer 14 (BROADCAST_EDGE), Reducer 15 (BROADCAST_EDGE), Reducer 17 (BROADCAST_EDGE), Reducer 21 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
+Map 18 <- Reducer 12 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE), Reducer 15 (BROADCAST_EDGE), Reducer 17 (BROADCAST_EDGE)
 Reducer 10 <- Map 18 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
 Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 13 (SIMPLE_EDGE)
 Reducer 12 <- Map 8 (CUSTOM_SIMPLE_EDGE)
@@ -112,16 +112,16 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_256]
-        Limit [LIM_255] (rows=100 width=88)
+      File Output Operator [FS_269]
+        Limit [LIM_268] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_254] (rows=421657640 width=88)
+          Select Operator [SEL_267] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_253]
-              Select Operator [SEL_252] (rows=421657640 width=88)
+            SHUFFLE [RS_266]
+              Select Operator [SEL_265] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13"]
-                Group By Operator [GBY_251] (rows=421657640 width=88)
+                Group By Operator [GBY_264] (rows=421657640 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","count(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)","sum(VALUE._col7)","count(VALUE._col8)","sum(VALUE._col9)","sum(VALUE._col10)","sum(VALUE._col11)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_50]
@@ -130,172 +130,181 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14"],aggregations:["count(_col3)","sum(_col3)","sum(_col7)","sum(_col6)","count(_col4)","sum(_col4)","sum(_col9)","sum(_col8)","count(_col5)","sum(_col5)","sum(_col11)","sum(_col10)"],keys:_col0, _col1, _col2
                       Select Operator [SEL_47] (rows=843315281 width=88)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
-                        Merge Join Operator [MERGEJOIN_202] (rows=843315281 width=88)
-                          Conds:RS_44._col3=RS_230._col0(Inner),Output:["_col5","_col9","_col10","_col14","_col21","_col25"]
+                        Merge Join Operator [MERGEJOIN_212] (rows=843315281 width=88)
+                          Conds:RS_44._col3=RS_250._col0(Inner),Output:["_col5","_col9","_col10","_col14","_col21","_col25"]
                         <-Map 20 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_230]
+                          SHUFFLE [RS_250]
                             PartitionCols:_col0
-                            Select Operator [SEL_229] (rows=1704 width=1910)
+                            Select Operator [SEL_249] (rows=1704 width=1910)
                               Output:["_col0","_col1"]
-                              Filter Operator [FIL_228] (rows=1704 width=1910)
+                              Filter Operator [FIL_248] (rows=1704 width=1910)
                                 predicate:s_store_sk is not null
                                 TableScan [TS_32] (rows=1704 width=1910)
                                   default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
                         <-Reducer 4 [SIMPLE_EDGE]
                           SHUFFLE [RS_44]
                             PartitionCols:_col3
-                            Merge Join Operator [MERGEJOIN_201] (rows=766650239 width=88)
+                            Merge Join Operator [MERGEJOIN_211] (rows=766650239 width=88)
                               Conds:RS_41._col1, _col2, _col4=RS_42._col7, _col8, _col9(Inner),Output:["_col3","_col5","_col9","_col10","_col14","_col21"]
                             <-Reducer 11 [SIMPLE_EDGE]
                               SHUFFLE [RS_42]
                                 PartitionCols:_col7, _col8, _col9
-                                Merge Join Operator [MERGEJOIN_200] (rows=348467716 width=135)
+                                Merge Join Operator [MERGEJOIN_210] (rows=348467716 width=135)
                                   Conds:RS_28._col2, _col1=RS_29._col1, _col2(Inner),Output:["_col3","_col7","_col8","_col9","_col10"]
                                 <-Reducer 13 [SIMPLE_EDGE]
                                   PARTITION_ONLY_SHUFFLE [RS_29]
                                     PartitionCols:_col1, _col2
-                                    Merge Join Operator [MERGEJOIN_199] (rows=63350266 width=77)
-                                      Conds:RS_243._col0=RS_213._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                                    Merge Join Operator [MERGEJOIN_209] (rows=63350266 width=77)
+                                      Conds:RS_241._col0=RS_223._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                                     <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_213]
+                                      PARTITION_ONLY_SHUFFLE [RS_223]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_208] (rows=36525 width=1119)
+                                        Select Operator [SEL_218] (rows=36525 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_205] (rows=36525 width=1119)
+                                          Filter Operator [FIL_215] (rows=36525 width=1119)
                                             predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
                                             TableScan [TS_3] (rows=73049 width=1119)
                                               default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_quarter_name"]
                                     <-Map 19 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_243]
+                                      SHUFFLE [RS_241]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_242] (rows=57591150 width=77)
+                                        Select Operator [SEL_240] (rows=57591150 width=77)
                                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                                          Filter Operator [FIL_241] (rows=57591150 width=77)
+                                          Filter Operator [FIL_239] (rows=57591150 width=77)
                                             predicate:(sr_customer_sk is not null and sr_item_sk is not null and sr_returned_date_sk is not null and sr_ticket_number is not null)
                                             TableScan [TS_15] (rows=57591150 width=77)
                                               default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_item_sk","sr_customer_sk","sr_ticket_number","sr_return_quantity"]
                                 <-Reducer 10 [SIMPLE_EDGE]
                                   SHUFFLE [RS_28]
                                     PartitionCols:_col2, _col1
-                                    Merge Join Operator [MERGEJOIN_198] (rows=316788826 width=135)
-                                      Conds:RS_250._col0=RS_211._col0(Inner),Output:["_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_208] (rows=316788826 width=135)
+                                      Conds:RS_263._col0=RS_221._col0(Inner),Output:["_col1","_col2","_col3"]
                                     <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_211]
+                                      PARTITION_ONLY_SHUFFLE [RS_221]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_207] (rows=36525 width=1119)
+                                        Select Operator [SEL_217] (rows=36525 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_204] (rows=36525 width=1119)
+                                          Filter Operator [FIL_214] (rows=36525 width=1119)
                                             predicate:((d_quarter_name) IN ('2000Q1', '2000Q2', '2000Q3') and d_date_sk is not null)
                                              Please refer to the previous TableScan [TS_3]
                                     <-Map 18 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_250]
+                                      SHUFFLE [RS_263]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_249] (rows=287989836 width=135)
+                                        Select Operator [SEL_262] (rows=287989836 width=135)
                                           Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_248] (rows=287989836 width=135)
-                                            predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_26_d3_d_date_sk_min) AND DynamicValue(RS_26_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_26_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
+                                          Filter Operator [FIL_261] (rows=287989836 width=135)
+                                            predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_26_d3_d_date_sk_min) AND DynamicValue(RS_26_d3_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_26_d3_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                             TableScan [TS_9] (rows=287989836 width=135)
                                               default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity"]
-                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_240]
-                                                Group By Operator [GBY_239] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_217]
-                                                    Group By Operator [GBY_215] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_212] (rows=36525 width=1119)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_207]
                                             <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_245]
-                                                Group By Operator [GBY_244] (rows=1 width=12)
+                                              BROADCAST [RS_244]
+                                                Group By Operator [GBY_242] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                 <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_141]
-                                                    Group By Operator [GBY_140] (rows=1 width=12)
+                                                  PARTITION_ONLY_SHUFFLE [RS_110]
+                                                    Group By Operator [GBY_109] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_139] (rows=63350266 width=77)
+                                                      Select Operator [SEL_108] (rows=63350266 width=77)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_199]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
                                             <-Reducer 15 [BROADCAST_EDGE] vectorized
                                               BROADCAST [RS_247]
-                                                Group By Operator [GBY_246] (rows=1 width=12)
+                                                Group By Operator [GBY_245] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=63350264)"]
                                                 <-Reducer 13 [CUSTOM_SIMPLE_EDGE]
-                                                  PARTITION_ONLY_SHUFFLE [RS_146]
-                                                    Group By Operator [GBY_145] (rows=1 width=12)
+                                                  PARTITION_ONLY_SHUFFLE [RS_125]
+                                                    Group By Operator [GBY_124] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=63350264)"]
-                                                      Select Operator [SEL_144] (rows=63350266 width=77)
+                                                      Select Operator [SEL_123] (rows=63350266 width=77)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_209]
+                                            <-Reducer 17 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_238]
+                                                Group By Operator [GBY_236] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  SHUFFLE [RS_235]
+                                                    Group By Operator [GBY_234] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_233] (rows=462000 width=1436)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Merge Join Operator [MERGEJOIN_199]
+                                                        Select Operator [SEL_231] (rows=462000 width=1436)
+                                                          Output:["_col0","_col1","_col2"]
+                                                          Filter Operator [FIL_230] (rows=462000 width=1436)
+                                                            predicate:i_item_sk is not null
+                                                            TableScan [TS_6] (rows=462000 width=1436)
+                                                              default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
+                                            <-Reducer 12 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_260]
+                                                Group By Operator [GBY_259] (rows=1 width=12)
+                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                  PARTITION_ONLY_SHUFFLE [RS_227]
+                                                    Group By Operator [GBY_225] (rows=1 width=12)
+                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                      Select Operator [SEL_222] (rows=36525 width=1119)
+                                                        Output:["_col0"]
+                                                         Please refer to the previous Select Operator [SEL_217]
                             <-Reducer 3 [SIMPLE_EDGE]
                               SHUFFLE [RS_41]
                                 PartitionCols:_col1, _col2, _col4
-                                Merge Join Operator [MERGEJOIN_197] (rows=696954748 width=88)
-                                  Conds:RS_38._col1=RS_222._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col9","_col10"]
+                                Merge Join Operator [MERGEJOIN_207] (rows=696954748 width=88)
+                                  Conds:RS_38._col1=RS_232._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col9","_col10"]
                                 <-Map 16 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_222]
+                                  SHUFFLE [RS_232]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_221] (rows=462000 width=1436)
-                                      Output:["_col0","_col1","_col2"]
-                                      Filter Operator [FIL_220] (rows=462000 width=1436)
-                                        predicate:i_item_sk is not null
-                                        TableScan [TS_6] (rows=462000 width=1436)
-                                          default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_item_desc"]
+                                     Please refer to the previous Select Operator [SEL_231]
                                 <-Reducer 2 [SIMPLE_EDGE]
                                   SHUFFLE [RS_38]
                                     PartitionCols:_col1
-                                    Merge Join Operator [MERGEJOIN_196] (rows=633595212 width=88)
-                                      Conds:RS_238._col0=RS_209._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
+                                    Merge Join Operator [MERGEJOIN_206] (rows=633595212 width=88)
+                                      Conds:RS_258._col0=RS_219._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5"]
                                     <-Map 8 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_209]
+                                      PARTITION_ONLY_SHUFFLE [RS_219]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_206] (rows=36524 width=1119)
+                                        Select Operator [SEL_216] (rows=36524 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_203] (rows=36524 width=1119)
+                                          Filter Operator [FIL_213] (rows=36524 width=1119)
                                             predicate:((d_quarter_name = '2000Q1') and d_date_sk is not null)
                                              Please refer to the previous TableScan [TS_3]
                                     <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_238]
+                                      SHUFFLE [RS_258]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_237] (rows=575995635 width=88)
+                                        Select Operator [SEL_257] (rows=575995635 width=88)
                                           Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                          Filter Operator [FIL_236] (rows=575995635 width=88)
-                                            predicate:((ss_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                          Filter Operator [FIL_256] (rows=575995635 width=88)
+                                            predicate:((ss_customer_sk BETWEEN DynamicValue(RS_29_store_returns_sr_customer_sk_min) AND DynamicValue(RS_29_store_returns_sr_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_29_store_returns_sr_customer_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_29_store_returns_sr_item_sk_min) AND DynamicValue(RS_29_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_29_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_36_d1_d_date_sk_min) AND DynamicValue(RS_36_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_36_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_45_store_s_store_sk_min) AND DynamicValue(RS_45_store_s_store_sk_max)
  and in_bloom_filter(ss_store_sk, DynamicValue(RS_45_store_s_store_sk_bloom_filter))) and ss_customer_sk is not null and ss_item_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                             TableScan [TS_0] (rows=575995635 width=88)
                                               default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_store_sk","ss_ticket_number","ss_quantity"]
+                                            <-Reducer 14 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_243]
+                                                 Please refer to the previous Group By Operator [GBY_242]
+                                            <-Reducer 15 [BROADCAST_EDGE] vectorized
+                                              BROADCAST [RS_246]
+                                                 Please refer to the previous Group By Operator [GBY_245]
                                             <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_227]
-                                                Group By Operator [GBY_226] (rows=1 width=12)
-                                                  Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_225]
-                                                    Group By Operator [GBY_224] (rows=1 width=12)
-                                                      Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_223] (rows=462000 width=1436)
-                                                        Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_221]
+                                              BROADCAST [RS_237]
+                                                 Please refer to the previous Group By Operator [GBY_236]
                                             <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_235]
-                                                Group By Operator [GBY_234] (rows=1 width=12)
+                                              BROADCAST [RS_255]
+                                                Group By Operator [GBY_254] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                 <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_233]
-                                                    Group By Operator [GBY_232] (rows=1 width=12)
+                                                  SHUFFLE [RS_253]
+                                                    Group By Operator [GBY_252] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_231] (rows=1704 width=1910)
+                                                      Select Operator [SEL_251] (rows=1704 width=1910)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_229]
+                                                         Please refer to the previous Select Operator [SEL_249]
                                             <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_219]
-                                                Group By Operator [GBY_218] (rows=1 width=12)
+                                              BROADCAST [RS_229]
+                                                Group By Operator [GBY_228] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                 <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_216]
-                                                    Group By Operator [GBY_214] (rows=1 width=12)
+                                                  PARTITION_ONLY_SHUFFLE [RS_226]
+                                                    Group By Operator [GBY_224] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_210] (rows=36524 width=1119)
+                                                      Select Operator [SEL_220] (rows=36524 width=1119)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_206]
+                                                         Please refer to the previous Select Operator [SEL_216]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query18.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query18.q.out b/ql/src/test/results/clientpositive/perf/tez/query18.q.out
index e858527..1b9b2fb 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query18.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query18.q.out
@@ -86,16 +86,16 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_187]
-        Limit [LIM_186] (rows=100 width=135)
+      File Output Operator [FS_189]
+        Limit [LIM_188] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_185] (rows=1054114882 width=135)
+          Select Operator [SEL_187] (rows=1054114882 width=135)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
           <-Reducer 5 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_184]
-              Select Operator [SEL_183] (rows=1054114882 width=135)
+            SHUFFLE [RS_186]
+              Select Operator [SEL_185] (rows=1054114882 width=135)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
-                Group By Operator [GBY_182] (rows=1054114882 width=135)
+                Group By Operator [GBY_184] (rows=1054114882 width=135)
                   Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","count(VALUE._col3)","sum(VALUE._col4)","count(VALUE._col5)","sum(VALUE._col6)","count(VALUE._col7)","sum(VALUE._col8)","count(VALUE._col9)","sum(VALUE._col10)","count(VALUE._col11)","sum(VALUE._col12)","count(VALUE._col13)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
                 <-Reducer 4 [SIMPLE_EDGE]
                   SHUFFLE [RS_43]
@@ -104,42 +104,42 @@ Stage-0
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"],aggregations:["sum(_col4)","count(_col4)","sum(_col5)","count(_col5)","sum(_col6)","count(_col6)","sum(_col7)","count(_col7)","sum(_col8)","count(_col8)","sum(_col9)","count(_col9)","sum(_col10)","count(_col10)"],keys:_col0, _col1, _col2, _col3, 0L
                       Select Operator [SEL_40] (rows=421645953 width=135)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10"]
-                        Merge Join Operator [MERGEJOIN_143] (rows=421645953 width=135)
+                        Merge Join Operator [MERGEJOIN_145] (rows=421645953 width=135)
                           Conds:RS_37._col0=RS_38._col3(Inner),Output:["_col4","_col6","_col7","_col8","_col11","_col16","_col17","_col18","_col19","_col20","_col26"]
                         <-Reducer 3 [SIMPLE_EDGE]
                           PARTITION_ONLY_SHUFFLE [RS_37]
                             PartitionCols:_col0
-                            Merge Join Operator [MERGEJOIN_139] (rows=48400001 width=860)
-                              Conds:RS_34._col1=RS_152._col0(Inner),Output:["_col0","_col4","_col6","_col7","_col8"]
+                            Merge Join Operator [MERGEJOIN_141] (rows=48400001 width=860)
+                              Conds:RS_34._col1=RS_154._col0(Inner),Output:["_col0","_col4","_col6","_col7","_col8"]
                             <-Map 9 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_152]
+                              SHUFFLE [RS_154]
                                 PartitionCols:_col0
-                                Select Operator [SEL_151] (rows=1861800 width=385)
+                                Select Operator [SEL_153] (rows=1861800 width=385)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_150] (rows=1861800 width=385)
+                                  Filter Operator [FIL_152] (rows=1861800 width=385)
                                     predicate:cd_demo_sk is not null
                                     TableScan [TS_6] (rows=1861800 width=385)
                                       default@customer_demographics,cd2,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk"]
                             <-Reducer 2 [SIMPLE_EDGE]
                               SHUFFLE [RS_34]
                                 PartitionCols:_col1
-                                Merge Join Operator [MERGEJOIN_138] (rows=44000000 width=860)
-                                  Conds:RS_146._col2=RS_149._col0(Inner),Output:["_col0","_col1","_col4","_col6","_col7","_col8"]
+                                Merge Join Operator [MERGEJOIN_140] (rows=44000000 width=860)
+                                  Conds:RS_148._col2=RS_151._col0(Inner),Output:["_col0","_col1","_col4","_col6","_col7","_col8"]
                                 <-Map 1 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_146]
+                                  SHUFFLE [RS_148]
                                     PartitionCols:_col2
-                                    Select Operator [SEL_145] (rows=40000000 width=860)
+                                    Select Operator [SEL_147] (rows=40000000 width=860)
                                       Output:["_col0","_col1","_col2","_col4"]
-                                      Filter Operator [FIL_144] (rows=40000000 width=860)
+                                      Filter Operator [FIL_146] (rows=40000000 width=860)
                                         predicate:((c_birth_month) IN (9, 5, 12, 4, 1, 10) and c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
                                         TableScan [TS_0] (rows=80000000 width=860)
                                           default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk","c_birth_month","c_birth_year"]
                                 <-Map 8 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_149]
+                                  SHUFFLE [RS_151]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_148] (rows=20000000 width=1014)
+                                    Select Operator [SEL_150] (rows=20000000 width=1014)
                                       Output:["_col0","_col1","_col2","_col3"]
-                                      Filter Operator [FIL_147] (rows=20000000 width=1014)
+                                      Filter Operator [FIL_149] (rows=20000000 width=1014)
                                         predicate:((ca_state) IN ('ND', 'WI', 'AL', 'NC', 'OK', 'MS', 'TN') and ca_address_sk is not null)
                                         TableScan [TS_3] (rows=40000000 width=1014)
                                           default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county","ca_state","ca_country"]
@@ -148,96 +148,96 @@ Stage-0
                             PartitionCols:_col3
                             Select Operator [SEL_30] (rows=383314495 width=135)
                               Output:["_col1","_col3","_col6","_col7","_col8","_col9","_col10","_col16"]
-                              Merge Join Operator [MERGEJOIN_142] (rows=383314495 width=135)
-                                Conds:RS_27._col3=RS_171._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col8","_col14","_col16"]
+                              Merge Join Operator [MERGEJOIN_144] (rows=383314495 width=135)
+                                Conds:RS_27._col3=RS_173._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col8","_col14","_col16"]
                               <-Map 18 [SIMPLE_EDGE] vectorized
-                                PARTITION_ONLY_SHUFFLE [RS_171]
+                                PARTITION_ONLY_SHUFFLE [RS_173]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_170] (rows=462000 width=1436)
+                                  Select Operator [SEL_172] (rows=462000 width=1436)
                                     Output:["_col0","_col1"]
-                                    Filter Operator [FIL_169] (rows=462000 width=1436)
+                                    Filter Operator [FIL_171] (rows=462000 width=1436)
                                       predicate:i_item_sk is not null
                                       TableScan [TS_18] (rows=462000 width=1436)
                                         default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id"]
                               <-Reducer 12 [SIMPLE_EDGE]
                                 SHUFFLE [RS_27]
                                   PartitionCols:_col3
-                                  Merge Join Operator [MERGEJOIN_141] (rows=348467716 width=135)
-                                    Conds:RS_24._col2=RS_163._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col14"]
+                                  Merge Join Operator [MERGEJOIN_143] (rows=348467716 width=135)
+                                    Conds:RS_24._col2=RS_165._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col8","_col14"]
                                   <-Map 16 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_163]
+                                    PARTITION_ONLY_SHUFFLE [RS_165]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_162] (rows=465450 width=385)
+                                      Select Operator [SEL_164] (rows=465450 width=385)
                                         Output:["_col0","_col3"]
-                                        Filter Operator [FIL_161] (rows=465450 width=385)
+                                        Filter Operator [FIL_163] (rows=465450 width=385)
                                           predicate:((cd_education_status = 'College') and (cd_gender = 'M') and cd_demo_sk is not null)
                                           TableScan [TS_15] (rows=1861800 width=385)
                                             default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_education_status","cd_dep_count"]
                                   <-Reducer 11 [SIMPLE_EDGE]
                                     SHUFFLE [RS_24]
                                       PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_140] (rows=316788826 width=135)
-                                        Conds:RS_181._col0=RS_155._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+                                      Merge Join Operator [MERGEJOIN_142] (rows=316788826 width=135)
+                                        Conds:RS_183._col0=RS_157._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
                                       <-Map 14 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_155]
+                                        PARTITION_ONLY_SHUFFLE [RS_157]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_154] (rows=36524 width=1119)
+                                          Select Operator [SEL_156] (rows=36524 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_153] (rows=36524 width=1119)
+                                            Filter Operator [FIL_155] (rows=36524 width=1119)
                                               predicate:((d_year = 2001) and d_date_sk is not null)
                                               TableScan [TS_12] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
                                       <-Map 10 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_181]
+                                        SHUFFLE [RS_183]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_180] (rows=287989836 width=135)
+                                          Select Operator [SEL_182] (rows=287989836 width=135)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-                                            Filter Operator [FIL_179] (rows=287989836 width=135)
+                                            Filter Operator [FIL_181] (rows=287989836 width=135)
                                               predicate:((cs_bill_cdemo_sk BETWEEN DynamicValue(RS_25_cd1_cd_demo_sk_min) AND DynamicValue(RS_25_cd1_cd_demo_sk_max) and in_bloom_filter(cs_bill_cdemo_sk, DynamicValue(RS_25_cd1_cd_demo_sk_bloom_filter))) and (cs_bill_customer_sk BETWEEN DynamicValue(RS_37_customer_c_customer_sk_min) AND DynamicValue(RS_37_customer_c_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_37_customer_c_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_28_item_i_item_sk_min) AND DynamicValue(RS_28_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_28_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_22_date_dim_d_date_sk_min) AND DynamicValue(RS_22_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_22_date_dim_d_date_sk_bloom_filter))) and cs_bill_cdemo_sk is not null and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk 
 is not null)
                                               TableScan [TS_9] (rows=287989836 width=135)
                                                 default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_bill_cdemo_sk","cs_item_sk","cs_quantity","cs_list_price","cs_sales_price","cs_coupon_amt","cs_net_profit"]
                                               <-Reducer 15 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_160]
-                                                  Group By Operator [GBY_159] (rows=1 width=12)
+                                                BROADCAST [RS_162]
+                                                  Group By Operator [GBY_161] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 14 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_158]
-                                                      Group By Operator [GBY_157] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_160]
+                                                      Group By Operator [GBY_159] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_156] (rows=36524 width=1119)
+                                                        Select Operator [SEL_158] (rows=36524 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_154]
+                                                           Please refer to the previous Select Operator [SEL_156]
                                               <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_168]
-                                                  Group By Operator [GBY_167] (rows=1 width=12)
+                                                BROADCAST [RS_170]
+                                                  Group By Operator [GBY_169] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_166]
-                                                      Group By Operator [GBY_165] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_168]
+                                                      Group By Operator [GBY_167] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_164] (rows=465450 width=385)
+                                                        Select Operator [SEL_166] (rows=465450 width=385)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_162]
+                                                           Please refer to the previous Select Operator [SEL_164]
                                               <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_176]
-                                                  Group By Operator [GBY_175] (rows=1 width=12)
+                                                BROADCAST [RS_178]
+                                                  Group By Operator [GBY_177] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_174]
-                                                      Group By Operator [GBY_173] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_176]
+                                                      Group By Operator [GBY_175] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_172] (rows=462000 width=1436)
+                                                        Select Operator [SEL_174] (rows=462000 width=1436)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_170]
+                                                           Please refer to the previous Select Operator [SEL_172]
                                               <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_178]
-                                                  Group By Operator [GBY_177] (rows=1 width=12)
+                                                BROADCAST [RS_180]
+                                                  Group By Operator [GBY_179] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=48400000)"]
                                                   <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
-                                                    PARTITION_ONLY_SHUFFLE [RS_120]
-                                                      Group By Operator [GBY_119] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_126]
+                                                      Group By Operator [GBY_125] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=48400000)"]
-                                                        Select Operator [SEL_118] (rows=48400001 width=860)
+                                                        Select Operator [SEL_124] (rows=48400001 width=860)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_139]
+                                                           Please refer to the previous Merge Join Operator [MERGEJOIN_141]
 


[34/48] hive git commit: HIVE-19940: Push predicates with deterministic UDFs with RBO (Janaki Lahorani, reviewed by Vineet Garg, Naveen Gangam)

Posted by se...@apache.org.
HIVE-19940: Push predicates with deterministic UDFs with RBO (Janaki Lahorani, reviewed by Vineet Garg, Naveen Gangam)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/34adf31a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/34adf31a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/34adf31a

Branch: refs/heads/master-txnstats
Commit: 34adf31af4189c42c9944c7e41820dcdb11bd01a
Parents: 0966a38
Author: Naveen Gangam <ng...@apache.org>
Authored: Mon Jul 16 12:23:08 2018 -0400
Committer: Naveen Gangam <ng...@apache.org>
Committed: Mon Jul 16 12:23:08 2018 -0400

----------------------------------------------------------------------
 .../hive/ql/ppd/ExprWalkerProcFactory.java      |  30 +-
 .../clientpositive/ppd_deterministic_expr.q     | 143 +++++
 .../test/queries/clientpositive/ppd_udf_col.q   |  48 ++
 .../clientpositive/llap/check_constraint.q.out  |  17 +-
 .../llap/enforce_constraint_notnull.q.out       |  17 +-
 .../results/clientpositive/llap/lineage3.q.out  |   2 +-
 .../clientpositive/llap/subquery_in.q.out       |  22 +-
 .../clientpositive/llap/subquery_notin.q.out    |  68 ++-
 .../clientpositive/masking_disablecbo_2.q.out   | 219 ++++----
 .../clientpositive/perf/tez/query8.q.out        | 116 ++--
 .../clientpositive/ppd_deterministic_expr.q.out | 553 +++++++++++++++++++
 .../results/clientpositive/ppd_udf_col.q.out    | 409 ++++++++++++++
 .../results/clientpositive/union_offcbo.q.out   |  34 +-
 13 files changed, 1425 insertions(+), 253 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
index b01a9ba..1c662d7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java
@@ -104,7 +104,11 @@ public final class ExprWalkerProcFactory {
           return false;
         } else {
           if (exp instanceof ExprNodeGenericFuncDesc) {
-            isCandidate = false;
+            if (isDeterministic((ExprNodeGenericFuncDesc) exp)) {
+              isCandidate = true;
+            } else {
+              isCandidate = false;
+            }
           }
           if (exp instanceof ExprNodeColumnDesc && ci == null) {
             ExprNodeColumnDesc column = (ExprNodeColumnDesc)exp;
@@ -136,6 +140,30 @@ public final class ExprWalkerProcFactory {
   }
 
   /**
+   *
+   * @param funcDesc function descriptor
+   * @return true if the function is deterministic false otherwise
+   */
+  public static boolean isDeterministic(ExprNodeGenericFuncDesc funcDesc) {
+    if (FunctionRegistry.isConsistentWithinQuery(funcDesc.getGenericUDF())) {
+      // check whether the children or deterministic
+      for (ExprNodeDesc exprNodeDesc : funcDesc.getChildren()) {
+        if (exprNodeDesc instanceof ExprNodeGenericFuncDesc) {
+          if (!isDeterministic((ExprNodeGenericFuncDesc) exprNodeDesc)) {
+            // some child is not deterministic - return false
+            return false;
+          }
+        }
+      }
+      // all children are deterministic - return true
+      return true;
+    }
+
+    // function is not deterministic - return false
+    return false;
+  }
+
+  /**
    * FieldExprProcessor.
    *
    */

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/queries/clientpositive/ppd_deterministic_expr.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ppd_deterministic_expr.q b/ql/src/test/queries/clientpositive/ppd_deterministic_expr.q
new file mode 100644
index 0000000..47c8849
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/ppd_deterministic_expr.q
@@ -0,0 +1,143 @@
+set hive.auto.convert.join=false;
+set hive.optimize.index.filter=true;
+set hive.cbo.enable=false;
+
+CREATE TABLE `testb`(
+   `cola` string COMMENT '',
+   `colb` string COMMENT '',
+   `colc` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+
+STORED AS AVRO;
+
+CREATE TABLE `testa`(
+   `col1` string COMMENT '',
+   `col2` string COMMENT '',
+   `col3` string COMMENT '',
+   `col4` string COMMENT '',
+   `col5` string COMMENT '')
+PARTITIONED BY (
+   `part1` string,
+   `part2` string,
+   `part3` string)
+STORED AS AVRO;
+
+insert into testA partition (part1='US', part2='ABC', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd');
+
+insert into testA partition (part1='UK', part2='DEF', part3='123')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd');
+
+insert into testA partition (part1='US', part2='DEF', part3='200')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd');
+
+insert into testA partition (part1='CA', part2='ABC', part3='300')
+values ('12.34', '100', '200', '300', 'abc'),
+('12.341', '1001', '2001', '3001', 'abcd');
+
+insert into testB partition (part1='CA', part2='ABC', part3='300')
+values ('600', '700', 'abc'), ('601', '701', 'abcd');
+
+insert into testB partition (part1='CA', part2='ABC', part3='400')
+values ( '600', '700', 'abc'), ( '601', '701', 'abcd');
+
+insert into testB partition (part1='UK', part2='PQR', part3='500')
+values ('600', '700', 'abc'), ('601', '701', 'abcd');
+
+insert into testB partition (part1='US', part2='DEF', part3='200')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd');
+
+insert into testB partition (part1='US', part2='PQR', part3='123')
+values ( '600', '700', 'abc'), ('601', '701', 'abcd');
+
+-- views with deterministic functions
+create view viewDeterministicUDFA partitioned on (vpart1, vpart2, vpart3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testa
+where part1 in ('US', 'CA');
+
+create view viewDeterministicUDFB partitioned on (vpart1, vpart2, vpart3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ cast(part1 as char(2)) as vpart1,
+ cast(part2 as char(3)) as vpart2,
+ cast(part3 as char(3)) as vpart3
+ from testb
+where part1 in ('US', 'CA');
+
+-- views without function reference
+create view viewNoUDFA partitioned on (part1, part2, part3) as select
+ cast(col1 as decimal(38,18)) as vcol1,
+ cast(col2 as decimal(38,18)) as vcol2,
+ cast(col3 as decimal(38,18)) as vcol3,
+ cast(col4 as decimal(38,18)) as vcol4,
+ cast(col5 as char(10)) as vcol5,
+ part1,
+ part2,
+ part3
+ from testa
+where part1 in ('US', 'CA');
+
+create view viewNoUDFB partitioned on (part1, part2, part3) as select
+ cast(cola as decimal(38,18)) as vcolA,
+ cast(colb as decimal(38,18)) as vcolB,
+ cast(colc as char(10)) as vcolC,
+ part1,
+ part2,
+ part3
+ from testb
+where part1 in ('US', 'CA');
+
+-- query referencing deterministic functions
+explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200';
+
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewDeterministicUDFA a inner join viewDeterministicUDFB b
+on a.vpart1 = b.vpart1
+and a.vpart2 = b.vpart2
+and a.vpart3 = b.vpart3
+and a.vpart1 = 'US'
+and a.vpart2 = 'DEF'
+and a.vpart3 = '200';
+
+-- query with views referencing no udfs
+explain
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200';
+
+select vcol1, vcol2, vcol3, vcola, vcolb
+from viewNoUDFA a inner join viewNoUDFB b
+on a.part1 = b.part1
+and a.part2 = b.part2
+and a.part3 = b.part3
+and a.part1 = 'US'
+and a.part2 = 'DEF'
+and a.part3 = '200';

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/queries/clientpositive/ppd_udf_col.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/ppd_udf_col.q b/ql/src/test/queries/clientpositive/ppd_udf_col.q
index 9c7d4fd..ac2f861 100644
--- a/ql/src/test/queries/clientpositive/ppd_udf_col.q
+++ b/ql/src/test/queries/clientpositive/ppd_udf_col.q
@@ -48,3 +48,51 @@ EXPLAIN
 SELECT key,randum123, v10
 FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
 WHERE a.v10 <= 200;
+
+set hive.cbo.enable=false;
+
+EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1;
+
+EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20;
+
+EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3;
+
+EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200;
+
+set hive.ppd.remove.duplicatefilters=false;
+
+EXPLAIN
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1;
+
+EXPLAIN
+SELECT * FROM
+(
+SELECT key, randum123
+FROM (SELECT *, cast(rand() as double) AS randum123 FROM src WHERE key = 100) a
+WHERE randum123 <=0.1)s WHERE s.randum123>0.1 LIMIT 20;
+
+EXPLAIN
+SELECT key,randum123, h4
+FROM (SELECT *, cast(rand() as double) AS randum123, hex(4) AS h4 FROM src WHERE key = 100) a
+WHERE a.h4 <= 3;
+
+EXPLAIN
+SELECT key,randum123, v10
+FROM (SELECT *, cast(rand() as double) AS randum123, value*10 AS v10 FROM src WHERE key = 100) a
+WHERE a.v10 <= 200;

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/llap/check_constraint.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/check_constraint.q.out b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
index 411b4a6..e4cd97e 100644
--- a/ql/src/test/results/clientpositive/llap/check_constraint.q.out
+++ b/ql/src/test/results/clientpositive/llap/check_constraint.q.out
@@ -1820,20 +1820,17 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: ((key < 10) and enforce_constraint((CAST( key AS decimal(5,2)) is not null and (CAST( key AS decimal(5,2)) >= CAST( UDFToInteger(key) AS decimal(5,2))) is not false))) (type: boolean)
+                    Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(key) (type: int), CAST( key AS decimal(5,2)) (type: decimal(5,2)), value (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 166 Data size: 34362 Basic stats: COMPLETE Column stats: COMPLETE
-                      Filter Operator
-                        predicate: enforce_constraint((_col1 is not null and (_col1 >= CAST( _col0 AS decimal(5,2))) is not false)) (type: boolean)
+                      Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          sort order: 
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
+                        value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
                   Filter Operator
                     predicate: ((key < 20) and (key > 10) and enforce_constraint(value is not null)) (type: boolean)
                     Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
index 5a3f519..5e766c8 100644
--- a/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
+++ b/ql/src/test/results/clientpositive/llap/enforce_constraint_notnull.q.out
@@ -3424,20 +3424,17 @@ STAGE PLANS:
                   alias: src
                   Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
-                    predicate: (key < 10) (type: boolean)
-                    Statistics: Num rows: 166 Data size: 29548 Basic stats: COMPLETE Column stats: COMPLETE
+                    predicate: ((key < 10) and enforce_constraint((UDFToInteger(key) is not null and value is not null))) (type: boolean)
+                    Statistics: Num rows: 83 Data size: 14774 Basic stats: COMPLETE Column stats: COMPLETE
                     Select Operator
                       expressions: UDFToInteger(key) (type: int), CAST( key AS decimal(5,2)) (type: decimal(5,2)), value (type: string)
                       outputColumnNames: _col0, _col1, _col2
-                      Statistics: Num rows: 166 Data size: 34362 Basic stats: COMPLETE Column stats: COMPLETE
-                      Filter Operator
-                        predicate: enforce_constraint((_col0 is not null and _col2 is not null)) (type: boolean)
+                      Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        sort order: 
+                        Map-reduce partition columns: _col0 (type: int)
                         Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          sort order: 
-                          Map-reduce partition columns: _col0 (type: int)
-                          Statistics: Num rows: 83 Data size: 17181 Basic stats: COMPLETE Column stats: COMPLETE
-                          value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
+                        value expressions: _col0 (type: int), _col1 (type: decimal(5,2)), _col2 (type: string)
                   Filter Operator
                     predicate: ((key < 20) and (key > 10) and enforce_constraint(value is not null)) (type: boolean)
                     Statistics: Num rows: 27 Data size: 4806 Basic stats: COMPLETE Column stats: COMPLETE

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/llap/lineage3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/lineage3.q.out b/ql/src/test/results/clientpositive/llap/lineage3.q.out
index 27dd874..e05d452 100644
--- a/ql/src/test/results/clientpositive/llap/lineage3.q.out
+++ b/ql/src/test/results/clientpositive/llap/lineage3.q.out
@@ -180,7 +180,7 @@ PREHOOK: Input: default@src1
 #### A masked pattern was here ####
 {"version":"1.0","engine":"tez","database":"default","hash":"94e9cc0a67801fe1503a3cb0c5029d59","queryText":"select * from src1 a\nwhere exists\n  (select cint from alltypesorc b\n   where a.key = b.ctinyint + 300)\nand key > 300","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(UDFToDouble(a.key) > 300.0D)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0,1],"expression":"(a.key = a.key)","edgeType":"PREDICATE"},{"sources":[4],"targets":[0,1],"expression":"b.ctinyint is not null","edgeType":"PREDICATE"},{"sources":[4,2],"targets":[0,1],"expression":"(UDFToDouble((UDFToInteger(b.ctinyint) + 300)) = UDFToDouble(a.key))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"a.key"},{"id":1,"vertexType":"COLUMN","vertexId":"a.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1
 .value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"}]}
 311	val_311
-Warning: Shuffle Join MERGEJOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[34][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select key, value from src1
 where key not in (select key+18 from src1) order by key
 PREHOOK: type: QUERY

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/llap/subquery_in.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_in.q.out b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
index 8f3912e..8007025 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_in.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_in.q.out
@@ -4575,23 +4575,19 @@ STAGE PLANS:
                   predicate: (_col1 is not null and _col2 is not null) (type: boolean)
                   Statistics: Num rows: 13 Data size: 260 Basic stats: COMPLETE Column stats: COMPLETE
                   Select Operator
-                    expressions: (_col1 / _col2) (type: double), _col0 (type: int)
+                    expressions: _col0 (type: int), (_col1 / _col2) (type: double)
                     outputColumnNames: _col0, _col1
                     Statistics: Num rows: 13 Data size: 156 Basic stats: COMPLETE Column stats: COMPLETE
-                    Select Operator
-                      expressions: _col1 (type: int), _col0 (type: double)
+                    Group By Operator
+                      keys: _col0 (type: int), _col1 (type: double)
+                      mode: hash
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 13 Data size: 156 Basic stats: COMPLETE Column stats: COMPLETE
-                      Group By Operator
-                        keys: _col0 (type: int), _col1 (type: double)
-                        mode: hash
-                        outputColumnNames: _col0, _col1
+                      Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: int), _col1 (type: double)
+                        sort order: ++
+                        Map-reduce partition columns: _col0 (type: int), _col1 (type: double)
                         Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
-                        Reduce Output Operator
-                          key expressions: _col0 (type: int), _col1 (type: double)
-                          sort order: ++
-                          Map-reduce partition columns: _col0 (type: int), _col1 (type: double)
-                          Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
 
   Stage: Stage-0
     Fetch Operator

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
index 469ec69..11facd1 100644
--- a/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
+++ b/ql/src/test/results/clientpositive/llap/subquery_notin.q.out
@@ -1416,7 +1416,7 @@ POSTHOOK: Input: default@t1_v
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@T2_v
 POSTHOOK: Lineage: T2_v.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
-Warning: Shuffle Join MERGEJOIN[33][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain
 select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
@@ -1478,7 +1478,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: CASE WHEN ((key > '104')) THEN (null) ELSE (key) END (type: string)
                       outputColumnNames: _col0
-                      Statistics: Num rows: 83 Data size: 15272 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 83 Data size: 7221 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         keys: _col0 (type: string)
                         mode: hash
@@ -1570,7 +1570,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[33][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select * 
 from T1_v where T1_v.key not in (select T2_v.key from T2_v)
 PREHOOK: type: QUERY
@@ -2427,7 +2427,7 @@ POSTHOOK: type: QUERY
 POSTHOOK: Input: default@part
 #### A masked pattern was here ####
 26
-Warning: Shuffle Join MERGEJOIN[35][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: explain select * from part  where floor(p_retailprice) NOT IN (select floor(min(p_retailprice)) from part group by p_type)
 PREHOOK: type: QUERY
 POSTHOOK: query: explain select * from part  where floor(p_retailprice) NOT IN (select floor(min(p_retailprice)) from part group by p_type)
@@ -2550,16 +2550,24 @@ STAGE PLANS:
                       sort order: 
                       Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE
                       value expressions: _col0 (type: bigint), _col1 (type: bigint)
-                  Group By Operator
-                    keys: _col0 (type: bigint)
-                    mode: hash
+                Select Operator
+                  expressions: _col1 (type: double)
+                  outputColumnNames: _col1
+                  Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                  Select Operator
+                    expressions: floor(_col1) (type: bigint)
                     outputColumnNames: _col0
-                    Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
-                    Reduce Output Operator
-                      key expressions: _col0 (type: bigint)
-                      sort order: +
-                      Map-reduce partition columns: _col0 (type: bigint)
+                    Statistics: Num rows: 13 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                    Group By Operator
+                      keys: _col0 (type: bigint)
+                      mode: hash
+                      outputColumnNames: _col0
                       Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: bigint)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: bigint)
+                        Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
         Reducer 6 
             Execution mode: vectorized, llap
             Reduce Operator Tree:
@@ -2597,7 +2605,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[35][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[36][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: select * from part  where floor(p_retailprice) NOT IN (select floor(min(p_retailprice)) from part group by p_type)
 PREHOOK: type: QUERY
 PREHOOK: Input: default@part
@@ -3106,7 +3114,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: (UDFToDouble(p_type) + 2.0D) (type: double), p_brand (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 26 Data size: 2600 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 26 Data size: 5096 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         keys: _col0 (type: double), _col1 (type: string)
                         mode: hash
@@ -3223,11 +3231,12 @@ STAGE PLANS:
 #### A masked pattern was here ####
       Edges:
         Reducer 10 <- Map 9 (SIMPLE_EDGE)
+        Reducer 11 <- Map 9 (SIMPLE_EDGE)
         Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (ONE_TO_ONE_EDGE)
         Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 8 (ONE_TO_ONE_EDGE)
         Reducer 5 <- Map 4 (SIMPLE_EDGE), Reducer 10 (ONE_TO_ONE_EDGE)
         Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
-        Reducer 7 <- Map 4 (SIMPLE_EDGE), Reducer 10 (ONE_TO_ONE_EDGE)
+        Reducer 7 <- Map 4 (SIMPLE_EDGE), Reducer 11 (ONE_TO_ONE_EDGE)
         Reducer 8 <- Reducer 7 (SIMPLE_EDGE)
 #### A masked pattern was here ####
       Vertices:
@@ -3286,7 +3295,7 @@ STAGE PLANS:
             Map Operator Tree:
                 TableScan
                   alias: part
-                  filterExpr: p_size is not null (type: boolean)
+                  filterExpr: (p_size is not null or p_size is not null) (type: boolean)
                   Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
                   Filter Operator
                     predicate: p_size is not null (type: boolean)
@@ -3305,6 +3314,23 @@ STAGE PLANS:
                           sort order: +
                           Map-reduce partition columns: _col0 (type: int)
                           Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: p_size is not null (type: boolean)
+                    Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: (p_size + 1) (type: int)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 26 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE
+                      Group By Operator
+                        keys: _col0 (type: int)
+                        mode: hash
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
+                        Reduce Output Operator
+                          key expressions: _col0 (type: int)
+                          sort order: +
+                          Map-reduce partition columns: _col0 (type: int)
+                          Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
             Execution mode: vectorized, llap
             LLAP IO: no inputs
         Reducer 10 
@@ -3320,6 +3346,14 @@ STAGE PLANS:
                   sort order: +
                   Map-reduce partition columns: _col0 (type: int)
                   Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
+        Reducer 11 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                keys: KEY._col0 (type: int)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 13 Data size: 52 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   key expressions: _col0 (type: int)
                   sort order: +
@@ -3822,7 +3856,7 @@ STAGE PLANS:
                     Select Operator
                       expressions: concat('v', value) (type: string), key (type: string)
                       outputColumnNames: _col0, _col1
-                      Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE
+                      Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
                       Group By Operator
                         keys: _col1 (type: string), _col0 (type: string)
                         mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
index fad0120..5a70e00 100644
--- a/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
+++ b/ql/src/test/results/clientpositive/masking_disablecbo_2.q.out
@@ -24,25 +24,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
       Execution mode: vectorized
 
   Stage: Stage-0
@@ -81,25 +78,22 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
+                File Output Operator
+                  compressed: false
                   Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  File Output Operator
-                    compressed: false
-                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    table:
-                        input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                        output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
       Execution mode: vectorized
 
   Stage: Stage-0
@@ -135,27 +129,21 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and reverse(value) is not null and (UDFToInteger(key) > 0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and reverse(value) is not null) (type: boolean)
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: _col1 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
                   Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: _col1 is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: _col1 (type: string)
-                      sort order: +
-                      Map-reduce partition columns: _col1 (type: string)
-                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: int)
+                  value expressions: _col0 (type: int)
           TableScan
             alias: a
             filterExpr: key is not null (type: boolean)
@@ -210,47 +198,38 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and UDFToDouble(UDFToInteger(key)) is not null) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and UDFToDouble(_col0) is not null) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and UDFToDouble(UDFToInteger(key)) is not null) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: UDFToDouble(_col0) (type: double)
+                  sort order: +
+                  Map-reduce partition columns: UDFToDouble(_col0) (type: double)
                   Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Reduce Output Operator
-                    key expressions: UDFToDouble(_col0) (type: double)
-                    sort order: +
-                    Map-reduce partition columns: UDFToDouble(_col0) (type: double)
-                    Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                    value expressions: _col0 (type: int), _col1 (type: string)
+                  value expressions: _col0 (type: int), _col1 (type: string)
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and UDFToDouble(reverse(value)) is not null and (UDFToInteger(key) > 0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0) and UDFToDouble(reverse(value)) is not null) (type: boolean)
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
+                Reduce Output Operator
+                  key expressions: UDFToDouble(_col1) (type: double)
+                  sort order: +
+                  Map-reduce partition columns: UDFToDouble(_col1) (type: double)
                   Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Filter Operator
-                    predicate: UDFToDouble(_col1) is not null (type: boolean)
-                    Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                    Reduce Output Operator
-                      key expressions: UDFToDouble(_col1) (type: double)
-                      sort order: +
-                      Map-reduce partition columns: UDFToDouble(_col1) (type: double)
-                      Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                      value expressions: _col0 (type: int)
+                  value expressions: _col0 (type: int)
       Reduce Operator Tree:
         Join Operator
           condition map:
@@ -288,56 +267,50 @@ STAGE PLANS:
       Map Operator Tree:
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10)) (type: boolean)
+              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
-                  Union
+                Union
+                  Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: int), _col1 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: int), _col1 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                       Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                        Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
           TableScan
             alias: src
+            filterExpr: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
             Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-            Select Operator
-              expressions: UDFToInteger(key) (type: int), value (type: string)
-              outputColumnNames: _col0, _col1
-              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
-              Filter Operator
-                predicate: (((_col0 % 2) = 0) and (_col0 < 10) and (_col0 > 0)) (type: boolean)
+            Filter Operator
+              predicate: (((UDFToInteger(key) % 2) = 0) and (UDFToInteger(key) < 10) and (UDFToInteger(key) > 0)) (type: boolean)
+              Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
+              Select Operator
+                expressions: UDFToInteger(key) (type: int), reverse(value) (type: string)
+                outputColumnNames: _col0, _col1
                 Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                Select Operator
-                  expressions: _col0 (type: int), reverse(_col1) (type: string)
-                  outputColumnNames: _col0, _col1
-                  Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
-                  Union
+                Union
+                  Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
+                  Group By Operator
+                    keys: _col0 (type: int), _col1 (type: string)
+                    mode: hash
+                    outputColumnNames: _col0, _col1
                     Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
-                    Group By Operator
-                      keys: _col0 (type: int), _col1 (type: string)
-                      mode: hash
-                      outputColumnNames: _col0, _col1
+                    Reduce Output Operator
+                      key expressions: _col0 (type: int), _col1 (type: string)
+                      sort order: ++
+                      Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
                       Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
-                      Reduce Output Operator
-                        key expressions: _col0 (type: int), _col1 (type: string)
-                        sort order: ++
-                        Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
-                        Statistics: Num rows: 110 Data size: 1167 Basic stats: COMPLETE Column stats: NONE
       Reduce Operator Tree:
         Group By Operator
           keys: KEY._col0 (type: int), KEY._col1 (type: string)

http://git-wip-us.apache.org/repos/asf/hive/blob/34adf31a/ql/src/test/results/clientpositive/perf/tez/query8.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query8.q.out b/ql/src/test/results/clientpositive/perf/tez/query8.q.out
index d9b82b4..26c7d8b 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query8.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query8.q.out
@@ -234,150 +234,150 @@ Stage-0
     limit:100
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_152]
-        Limit [LIM_151] (rows=100 width=88)
+      File Output Operator [FS_150]
+        Limit [LIM_149] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_150] (rows=348477374 width=88)
+          Select Operator [SEL_148] (rows=348477374 width=88)
             Output:["_col0","_col1"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_149]
-              Group By Operator [GBY_148] (rows=348477374 width=88)
+            SHUFFLE [RS_147]
+              Group By Operator [GBY_146] (rows=348477374 width=88)
                 Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
               <-Reducer 3 [SIMPLE_EDGE]
                 SHUFFLE [RS_57]
                   PartitionCols:_col0
                   Group By Operator [GBY_56] (rows=696954748 width=88)
                     Output:["_col0","_col1"],aggregations:["sum(_col2)"],keys:_col8
-                    Merge Join Operator [MERGEJOIN_119] (rows=696954748 width=88)
+                    Merge Join Operator [MERGEJOIN_117] (rows=696954748 width=88)
                       Conds:RS_52._col1=RS_53._col1(Inner),Output:["_col2","_col8"]
                     <-Reducer 12 [SIMPLE_EDGE]
                       SHUFFLE [RS_53]
                         PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_118] (rows=1874 width=1911)
-                          Conds:RS_139.substr(_col0, 1, 2)=RS_142.substr(_col2, 1, 2)(Inner),Output:["_col1","_col2"]
+                        Merge Join Operator [MERGEJOIN_116] (rows=1874 width=1911)
+                          Conds:RS_137.substr(_col0, 1, 2)=RS_140.substr(_col2, 1, 2)(Inner),Output:["_col1","_col2"]
                         <-Map 19 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_142]
+                          SHUFFLE [RS_140]
                             PartitionCols:substr(_col2, 1, 2)
-                            Select Operator [SEL_141] (rows=1704 width=1910)
+                            Select Operator [SEL_139] (rows=1704 width=1910)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_140] (rows=1704 width=1910)
+                              Filter Operator [FIL_138] (rows=1704 width=1910)
                                 predicate:(s_store_sk is not null and substr(s_zip, 1, 2) is not null)
                                 TableScan [TS_42] (rows=1704 width=1910)
                                   default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
                         <-Reducer 11 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_139]
+                          SHUFFLE [RS_137]
                             PartitionCols:substr(_col0, 1, 2)
-                            Select Operator [SEL_138] (rows=1 width=1014)
+                            Select Operator [SEL_136] (rows=1 width=1014)
                               Output:["_col0"]
-                              Filter Operator [FIL_137] (rows=1 width=1014)
+                              Filter Operator [FIL_135] (rows=1 width=1014)
                                 predicate:(_col1 = 2L)
-                                Group By Operator [GBY_136] (rows=6833333 width=1014)
+                                Group By Operator [GBY_134] (rows=6833333 width=1014)
                                   Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                                 <-Union 10 [SIMPLE_EDGE]
                                   <-Reducer 17 [CONTAINS] vectorized
-                                    Reduce Output Operator [RS_173]
+                                    Reduce Output Operator [RS_171]
                                       PartitionCols:_col0
-                                      Group By Operator [GBY_172] (rows=13666666 width=1014)
+                                      Group By Operator [GBY_170] (rows=13666666 width=1014)
                                         Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
-                                        Group By Operator [GBY_171] (rows=3666666 width=1014)
+                                        Group By Operator [GBY_169] (rows=3666666 width=1014)
                                           Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                                         <-Reducer 16 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_170]
+                                          SHUFFLE [RS_168]
                                             PartitionCols:_col0
-                                            Group By Operator [GBY_169] (rows=7333333 width=1014)
+                                            Group By Operator [GBY_167] (rows=7333333 width=1014)
                                               Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                                              Select Operator [SEL_168] (rows=7333333 width=1014)
+                                              Select Operator [SEL_166] (rows=7333333 width=1014)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_167] (rows=7333333 width=1014)
+                                                Filter Operator [FIL_165] (rows=7333333 width=1014)
                                                   predicate:(_col1 > 10L)
-                                                  Group By Operator [GBY_166] (rows=22000000 width=1014)
+                                                  Group By Operator [GBY_164] (rows=22000000 width=1014)
                                                     Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                                                   <-Reducer 15 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_25]
                                                       PartitionCols:_col0
                                                       Group By Operator [GBY_24] (rows=44000000 width=1014)
                                                         Output:["_col0","_col1"],aggregations:["count()"],keys:_col1
-                                                        Merge Join Operator [MERGEJOIN_117] (rows=44000000 width=1014)
-                                                          Conds:RS_162._col0=RS_165._col0(Inner),Output:["_col1"]
+                                                        Merge Join Operator [MERGEJOIN_115] (rows=44000000 width=1014)
+                                                          Conds:RS_160._col0=RS_163._col0(Inner),Output:["_col1"]
                                                         <-Map 14 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_162]
+                                                          SHUFFLE [RS_160]
                                                             PartitionCols:_col0
-                                                            Select Operator [SEL_161] (rows=40000000 width=1014)
+                                                            Select Operator [SEL_159] (rows=40000000 width=1014)
                                                               Output:["_col0","_col1"]
-                                                              Filter Operator [FIL_160] (rows=40000000 width=1014)
+                                                              Filter Operator [FIL_158] (rows=40000000 width=1014)
                                                                 predicate:(ca_address_sk is not null and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
                                                                 TableScan [TS_14] (rows=40000000 width=1014)
                                                                   default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_zip"]
                                                         <-Map 18 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_165]
+                                                          SHUFFLE [RS_163]
                                                             PartitionCols:_col0
-                                                            Select Operator [SEL_164] (rows=40000000 width=860)
+                                                            Select Operator [SEL_162] (rows=40000000 width=860)
                                                               Output:["_col0"]
-                                                              Filter Operator [FIL_163] (rows=40000000 width=860)
+                                                              Filter Operator [FIL_161] (rows=40000000 width=860)
                                                                 predicate:((c_preferred_cust_flag = 'Y') and c_current_addr_sk is not null)
                                                                 TableScan [TS_17] (rows=80000000 width=860)
                                                                   default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_current_addr_sk","c_preferred_cust_flag"]
                                   <-Reducer 9 [CONTAINS] vectorized
-                                    Reduce Output Operator [RS_159]
+                                    Reduce Output Operator [RS_157]
                                       PartitionCols:_col0
-                                      Group By Operator [GBY_158] (rows=13666666 width=1014)
+                                      Group By Operator [GBY_156] (rows=13666666 width=1014)
                                         Output:["_col0","_col1"],aggregations:["count(_col1)"],keys:_col0
-                                        Group By Operator [GBY_157] (rows=10000000 width=1014)
+                                        Group By Operator [GBY_155] (rows=10000000 width=1014)
                                           Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                                         <-Map 8 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_156]
+                                          SHUFFLE [RS_154]
                                             PartitionCols:_col0
-                                            Group By Operator [GBY_155] (rows=20000000 width=1014)
+                                            Group By Operator [GBY_153] (rows=20000000 width=1014)
                                               Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                                              Select Operator [SEL_154] (rows=20000000 width=1014)
+                                              Select Operator [SEL_152] (rows=20000000 width=1014)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_153] (rows=20000000 width=1014)
+                                                Filter Operator [FIL_151] (rows=20000000 width=1014)
                                                   predicate:((substr(ca_zip, 1, 5)) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779', '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797', '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068', '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053', '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425', '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835', '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360', '48649', '39050', '41793', '25002', '27413', '39736', 
 '47208', '16515', '94808', '57648', '15009', '80015', '42961', '63982', '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447', '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792', '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561', '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458', '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648', '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799', '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036', '88376', '45539', '35901', '19506', '65690'
 , '73957', '71850', '49231', '14276', '20005', '18384', '76615', '11635', '38177', '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121', '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560', '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619', '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010', '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705', '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928', '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047', '94167', '82564', '21156', '1579
 9', '86709', '37931', '74703', '83103', '23054', '70470', '72008', '49247', '91911', '69998', '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '25683', '61869', '51744', '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309', '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622', '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936', '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492') and substr(substr(ca_zip, 1, 5), 1, 2) is not null)
                                                   TableScan [TS_6] (rows=40000000 width=1014)
                                                     default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_zip"]
                     <-Reducer 2 [SIMPLE_EDGE]
                       SHUFFLE [RS_52]
                         PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_116] (rows=633595212 width=88)
-                          Conds:RS_147._col0=RS_130._col0(Inner),Output:["_col1","_col2"]
+                        Merge Join Operator [MERGEJOIN_114] (rows=633595212 width=88)
+                          Conds:RS_145._col0=RS_128._col0(Inner),Output:["_col1","_col2"]
                         <-Map 6 [SIMPLE_EDGE] vectorized
-                          PARTITION_ONLY_SHUFFLE [RS_130]
+                          PARTITION_ONLY_SHUFFLE [RS_128]
                             PartitionCols:_col0
-                            Select Operator [SEL_129] (rows=18262 width=1119)
+                            Select Operator [SEL_127] (rows=18262 width=1119)
                               Output:["_col0"]
-                              Filter Operator [FIL_128] (rows=18262 width=1119)
+                              Filter Operator [FIL_126] (rows=18262 width=1119)
                                 predicate:((d_qoy = 1) and (d_year = 2002) and d_date_sk is not null)
                                 TableScan [TS_3] (rows=73049 width=1119)
                                   default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_qoy"]
                         <-Map 1 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_147]
+                          SHUFFLE [RS_145]
                             PartitionCols:_col0
-                            Select Operator [SEL_146] (rows=575995635 width=88)
+                            Select Operator [SEL_144] (rows=575995635 width=88)
                               Output:["_col0","_col1","_col2"]
-                              Filter Operator [FIL_145] (rows=575995635 width=88)
+                              Filter Operator [FIL_143] (rows=575995635 width=88)
                                 predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_50_date_dim_d_date_sk_min) AND DynamicValue(RS_50_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_50_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_53_store_s_store_sk_min) AND DynamicValue(RS_53_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_53_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
                                 TableScan [TS_0] (rows=575995635 width=88)
                                   default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_net_profit"]
                                 <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_144]
-                                    Group By Operator [GBY_143] (rows=1 width=12)
+                                  BROADCAST [RS_142]
+                                    Group By Operator [GBY_141] (rows=1 width=12)
                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                     <-Reducer 12 [CUSTOM_SIMPLE_EDGE]
-                                      SHUFFLE [RS_93]
-                                        Group By Operator [GBY_92] (rows=1 width=12)
+                                      SHUFFLE [RS_91]
+                                        Group By Operator [GBY_90] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_91] (rows=1874 width=1911)
+                                          Select Operator [SEL_89] (rows=1874 width=1911)
                                             Output:["_col0"]
-                                             Please refer to the previous Merge Join Operator [MERGEJOIN_118]
+                                             Please refer to the previous Merge Join Operator [MERGEJOIN_116]
                                 <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                  BROADCAST [RS_135]
-                                    Group By Operator [GBY_134] (rows=1 width=12)
+                                  BROADCAST [RS_133]
+                                    Group By Operator [GBY_132] (rows=1 width=12)
                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                     <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_133]
-                                        Group By Operator [GBY_132] (rows=1 width=12)
+                                      PARTITION_ONLY_SHUFFLE [RS_131]
+                                        Group By Operator [GBY_130] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                          Select Operator [SEL_131] (rows=18262 width=1119)
+                                          Select Operator [SEL_129] (rows=18262 width=1119)
                                             Output:["_col0"]
-                                             Please refer to the previous Select Operator [SEL_129]
+                                             Please refer to the previous Select Operator [SEL_127]
 


[22/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
index 4467479..f0c308d 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMFullResourcePlan.java
@@ -755,14 +755,14 @@ import org.slf4j.LoggerFactory;
           case 2: // POOLS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list872 = iprot.readListBegin();
-                struct.pools = new ArrayList<WMPool>(_list872.size);
-                WMPool _elem873;
-                for (int _i874 = 0; _i874 < _list872.size; ++_i874)
+                org.apache.thrift.protocol.TList _list864 = iprot.readListBegin();
+                struct.pools = new ArrayList<WMPool>(_list864.size);
+                WMPool _elem865;
+                for (int _i866 = 0; _i866 < _list864.size; ++_i866)
                 {
-                  _elem873 = new WMPool();
-                  _elem873.read(iprot);
-                  struct.pools.add(_elem873);
+                  _elem865 = new WMPool();
+                  _elem865.read(iprot);
+                  struct.pools.add(_elem865);
                 }
                 iprot.readListEnd();
               }
@@ -774,14 +774,14 @@ import org.slf4j.LoggerFactory;
           case 3: // MAPPINGS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list875 = iprot.readListBegin();
-                struct.mappings = new ArrayList<WMMapping>(_list875.size);
-                WMMapping _elem876;
-                for (int _i877 = 0; _i877 < _list875.size; ++_i877)
+                org.apache.thrift.protocol.TList _list867 = iprot.readListBegin();
+                struct.mappings = new ArrayList<WMMapping>(_list867.size);
+                WMMapping _elem868;
+                for (int _i869 = 0; _i869 < _list867.size; ++_i869)
                 {
-                  _elem876 = new WMMapping();
-                  _elem876.read(iprot);
-                  struct.mappings.add(_elem876);
+                  _elem868 = new WMMapping();
+                  _elem868.read(iprot);
+                  struct.mappings.add(_elem868);
                 }
                 iprot.readListEnd();
               }
@@ -793,14 +793,14 @@ import org.slf4j.LoggerFactory;
           case 4: // TRIGGERS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list878 = iprot.readListBegin();
-                struct.triggers = new ArrayList<WMTrigger>(_list878.size);
-                WMTrigger _elem879;
-                for (int _i880 = 0; _i880 < _list878.size; ++_i880)
+                org.apache.thrift.protocol.TList _list870 = iprot.readListBegin();
+                struct.triggers = new ArrayList<WMTrigger>(_list870.size);
+                WMTrigger _elem871;
+                for (int _i872 = 0; _i872 < _list870.size; ++_i872)
                 {
-                  _elem879 = new WMTrigger();
-                  _elem879.read(iprot);
-                  struct.triggers.add(_elem879);
+                  _elem871 = new WMTrigger();
+                  _elem871.read(iprot);
+                  struct.triggers.add(_elem871);
                 }
                 iprot.readListEnd();
               }
@@ -812,14 +812,14 @@ import org.slf4j.LoggerFactory;
           case 5: // POOL_TRIGGERS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list881 = iprot.readListBegin();
-                struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list881.size);
-                WMPoolTrigger _elem882;
-                for (int _i883 = 0; _i883 < _list881.size; ++_i883)
+                org.apache.thrift.protocol.TList _list873 = iprot.readListBegin();
+                struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list873.size);
+                WMPoolTrigger _elem874;
+                for (int _i875 = 0; _i875 < _list873.size; ++_i875)
                 {
-                  _elem882 = new WMPoolTrigger();
-                  _elem882.read(iprot);
-                  struct.poolTriggers.add(_elem882);
+                  _elem874 = new WMPoolTrigger();
+                  _elem874.read(iprot);
+                  struct.poolTriggers.add(_elem874);
                 }
                 iprot.readListEnd();
               }
@@ -850,9 +850,9 @@ import org.slf4j.LoggerFactory;
         oprot.writeFieldBegin(POOLS_FIELD_DESC);
         {
           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.pools.size()));
-          for (WMPool _iter884 : struct.pools)
+          for (WMPool _iter876 : struct.pools)
           {
-            _iter884.write(oprot);
+            _iter876.write(oprot);
           }
           oprot.writeListEnd();
         }
@@ -863,9 +863,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(MAPPINGS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.mappings.size()));
-            for (WMMapping _iter885 : struct.mappings)
+            for (WMMapping _iter877 : struct.mappings)
             {
-              _iter885.write(oprot);
+              _iter877.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -877,9 +877,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TRIGGERS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size()));
-            for (WMTrigger _iter886 : struct.triggers)
+            for (WMTrigger _iter878 : struct.triggers)
             {
-              _iter886.write(oprot);
+              _iter878.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -891,9 +891,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(POOL_TRIGGERS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.poolTriggers.size()));
-            for (WMPoolTrigger _iter887 : struct.poolTriggers)
+            for (WMPoolTrigger _iter879 : struct.poolTriggers)
             {
-              _iter887.write(oprot);
+              _iter879.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -920,9 +920,9 @@ import org.slf4j.LoggerFactory;
       struct.plan.write(oprot);
       {
         oprot.writeI32(struct.pools.size());
-        for (WMPool _iter888 : struct.pools)
+        for (WMPool _iter880 : struct.pools)
         {
-          _iter888.write(oprot);
+          _iter880.write(oprot);
         }
       }
       BitSet optionals = new BitSet();
@@ -939,27 +939,27 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetMappings()) {
         {
           oprot.writeI32(struct.mappings.size());
-          for (WMMapping _iter889 : struct.mappings)
+          for (WMMapping _iter881 : struct.mappings)
           {
-            _iter889.write(oprot);
+            _iter881.write(oprot);
           }
         }
       }
       if (struct.isSetTriggers()) {
         {
           oprot.writeI32(struct.triggers.size());
-          for (WMTrigger _iter890 : struct.triggers)
+          for (WMTrigger _iter882 : struct.triggers)
           {
-            _iter890.write(oprot);
+            _iter882.write(oprot);
           }
         }
       }
       if (struct.isSetPoolTriggers()) {
         {
           oprot.writeI32(struct.poolTriggers.size());
-          for (WMPoolTrigger _iter891 : struct.poolTriggers)
+          for (WMPoolTrigger _iter883 : struct.poolTriggers)
           {
-            _iter891.write(oprot);
+            _iter883.write(oprot);
           }
         }
       }
@@ -972,56 +972,56 @@ import org.slf4j.LoggerFactory;
       struct.plan.read(iprot);
       struct.setPlanIsSet(true);
       {
-        org.apache.thrift.protocol.TList _list892 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-        struct.pools = new ArrayList<WMPool>(_list892.size);
-        WMPool _elem893;
-        for (int _i894 = 0; _i894 < _list892.size; ++_i894)
+        org.apache.thrift.protocol.TList _list884 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.pools = new ArrayList<WMPool>(_list884.size);
+        WMPool _elem885;
+        for (int _i886 = 0; _i886 < _list884.size; ++_i886)
         {
-          _elem893 = new WMPool();
-          _elem893.read(iprot);
-          struct.pools.add(_elem893);
+          _elem885 = new WMPool();
+          _elem885.read(iprot);
+          struct.pools.add(_elem885);
         }
       }
       struct.setPoolsIsSet(true);
       BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list895 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.mappings = new ArrayList<WMMapping>(_list895.size);
-          WMMapping _elem896;
-          for (int _i897 = 0; _i897 < _list895.size; ++_i897)
+          org.apache.thrift.protocol.TList _list887 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.mappings = new ArrayList<WMMapping>(_list887.size);
+          WMMapping _elem888;
+          for (int _i889 = 0; _i889 < _list887.size; ++_i889)
           {
-            _elem896 = new WMMapping();
-            _elem896.read(iprot);
-            struct.mappings.add(_elem896);
+            _elem888 = new WMMapping();
+            _elem888.read(iprot);
+            struct.mappings.add(_elem888);
           }
         }
         struct.setMappingsIsSet(true);
       }
       if (incoming.get(1)) {
         {
-          org.apache.thrift.protocol.TList _list898 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.triggers = new ArrayList<WMTrigger>(_list898.size);
-          WMTrigger _elem899;
-          for (int _i900 = 0; _i900 < _list898.size; ++_i900)
+          org.apache.thrift.protocol.TList _list890 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.triggers = new ArrayList<WMTrigger>(_list890.size);
+          WMTrigger _elem891;
+          for (int _i892 = 0; _i892 < _list890.size; ++_i892)
           {
-            _elem899 = new WMTrigger();
-            _elem899.read(iprot);
-            struct.triggers.add(_elem899);
+            _elem891 = new WMTrigger();
+            _elem891.read(iprot);
+            struct.triggers.add(_elem891);
           }
         }
         struct.setTriggersIsSet(true);
       }
       if (incoming.get(2)) {
         {
-          org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list901.size);
-          WMPoolTrigger _elem902;
-          for (int _i903 = 0; _i903 < _list901.size; ++_i903)
+          org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.poolTriggers = new ArrayList<WMPoolTrigger>(_list893.size);
+          WMPoolTrigger _elem894;
+          for (int _i895 = 0; _i895 < _list893.size; ++_i895)
           {
-            _elem902 = new WMPoolTrigger();
-            _elem902.read(iprot);
-            struct.poolTriggers.add(_elem902);
+            _elem894 = new WMPoolTrigger();
+            _elem894.read(iprot);
+            struct.poolTriggers.add(_elem894);
           }
         }
         struct.setPoolTriggersIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
index c6cb845..6eed84b 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetAllResourcePlanResponse.java
@@ -346,14 +346,14 @@ import org.slf4j.LoggerFactory;
           case 1: // RESOURCE_PLANS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list904 = iprot.readListBegin();
-                struct.resourcePlans = new ArrayList<WMResourcePlan>(_list904.size);
-                WMResourcePlan _elem905;
-                for (int _i906 = 0; _i906 < _list904.size; ++_i906)
+                org.apache.thrift.protocol.TList _list896 = iprot.readListBegin();
+                struct.resourcePlans = new ArrayList<WMResourcePlan>(_list896.size);
+                WMResourcePlan _elem897;
+                for (int _i898 = 0; _i898 < _list896.size; ++_i898)
                 {
-                  _elem905 = new WMResourcePlan();
-                  _elem905.read(iprot);
-                  struct.resourcePlans.add(_elem905);
+                  _elem897 = new WMResourcePlan();
+                  _elem897.read(iprot);
+                  struct.resourcePlans.add(_elem897);
                 }
                 iprot.readListEnd();
               }
@@ -380,9 +380,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(RESOURCE_PLANS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourcePlans.size()));
-            for (WMResourcePlan _iter907 : struct.resourcePlans)
+            for (WMResourcePlan _iter899 : struct.resourcePlans)
             {
-              _iter907.write(oprot);
+              _iter899.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -414,9 +414,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetResourcePlans()) {
         {
           oprot.writeI32(struct.resourcePlans.size());
-          for (WMResourcePlan _iter908 : struct.resourcePlans)
+          for (WMResourcePlan _iter900 : struct.resourcePlans)
           {
-            _iter908.write(oprot);
+            _iter900.write(oprot);
           }
         }
       }
@@ -428,14 +428,14 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.resourcePlans = new ArrayList<WMResourcePlan>(_list909.size);
-          WMResourcePlan _elem910;
-          for (int _i911 = 0; _i911 < _list909.size; ++_i911)
+          org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.resourcePlans = new ArrayList<WMResourcePlan>(_list901.size);
+          WMResourcePlan _elem902;
+          for (int _i903 = 0; _i903 < _list901.size; ++_i903)
           {
-            _elem910 = new WMResourcePlan();
-            _elem910.read(iprot);
-            struct.resourcePlans.add(_elem910);
+            _elem902 = new WMResourcePlan();
+            _elem902.read(iprot);
+            struct.resourcePlans.add(_elem902);
           }
         }
         struct.setResourcePlansIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
index 9eed335..53ea5d5 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMGetTriggersForResourePlanResponse.java
@@ -346,14 +346,14 @@ import org.slf4j.LoggerFactory;
           case 1: // TRIGGERS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list928 = iprot.readListBegin();
-                struct.triggers = new ArrayList<WMTrigger>(_list928.size);
-                WMTrigger _elem929;
-                for (int _i930 = 0; _i930 < _list928.size; ++_i930)
+                org.apache.thrift.protocol.TList _list920 = iprot.readListBegin();
+                struct.triggers = new ArrayList<WMTrigger>(_list920.size);
+                WMTrigger _elem921;
+                for (int _i922 = 0; _i922 < _list920.size; ++_i922)
                 {
-                  _elem929 = new WMTrigger();
-                  _elem929.read(iprot);
-                  struct.triggers.add(_elem929);
+                  _elem921 = new WMTrigger();
+                  _elem921.read(iprot);
+                  struct.triggers.add(_elem921);
                 }
                 iprot.readListEnd();
               }
@@ -380,9 +380,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TRIGGERS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.triggers.size()));
-            for (WMTrigger _iter931 : struct.triggers)
+            for (WMTrigger _iter923 : struct.triggers)
             {
-              _iter931.write(oprot);
+              _iter923.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -414,9 +414,9 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetTriggers()) {
         {
           oprot.writeI32(struct.triggers.size());
-          for (WMTrigger _iter932 : struct.triggers)
+          for (WMTrigger _iter924 : struct.triggers)
           {
-            _iter932.write(oprot);
+            _iter924.write(oprot);
           }
         }
       }
@@ -428,14 +428,14 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(1);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-          struct.triggers = new ArrayList<WMTrigger>(_list933.size);
-          WMTrigger _elem934;
-          for (int _i935 = 0; _i935 < _list933.size; ++_i935)
+          org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.triggers = new ArrayList<WMTrigger>(_list925.size);
+          WMTrigger _elem926;
+          for (int _i927 = 0; _i927 < _list925.size; ++_i927)
           {
-            _elem934 = new WMTrigger();
-            _elem934.read(iprot);
-            struct.triggers.add(_elem934);
+            _elem926 = new WMTrigger();
+            _elem926.read(iprot);
+            struct.triggers.add(_elem926);
           }
         }
         struct.setTriggersIsSet(true);

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
index ee9251c..0dd8a5e 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/WMValidateResourcePlanResponse.java
@@ -441,13 +441,13 @@ import org.slf4j.LoggerFactory;
           case 1: // ERRORS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list912 = iprot.readListBegin();
-                struct.errors = new ArrayList<String>(_list912.size);
-                String _elem913;
-                for (int _i914 = 0; _i914 < _list912.size; ++_i914)
+                org.apache.thrift.protocol.TList _list904 = iprot.readListBegin();
+                struct.errors = new ArrayList<String>(_list904.size);
+                String _elem905;
+                for (int _i906 = 0; _i906 < _list904.size; ++_i906)
                 {
-                  _elem913 = iprot.readString();
-                  struct.errors.add(_elem913);
+                  _elem905 = iprot.readString();
+                  struct.errors.add(_elem905);
                 }
                 iprot.readListEnd();
               }
@@ -459,13 +459,13 @@ import org.slf4j.LoggerFactory;
           case 2: // WARNINGS
             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
               {
-                org.apache.thrift.protocol.TList _list915 = iprot.readListBegin();
-                struct.warnings = new ArrayList<String>(_list915.size);
-                String _elem916;
-                for (int _i917 = 0; _i917 < _list915.size; ++_i917)
+                org.apache.thrift.protocol.TList _list907 = iprot.readListBegin();
+                struct.warnings = new ArrayList<String>(_list907.size);
+                String _elem908;
+                for (int _i909 = 0; _i909 < _list907.size; ++_i909)
                 {
-                  _elem916 = iprot.readString();
-                  struct.warnings.add(_elem916);
+                  _elem908 = iprot.readString();
+                  struct.warnings.add(_elem908);
                 }
                 iprot.readListEnd();
               }
@@ -492,9 +492,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(ERRORS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.errors.size()));
-            for (String _iter918 : struct.errors)
+            for (String _iter910 : struct.errors)
             {
-              oprot.writeString(_iter918);
+              oprot.writeString(_iter910);
             }
             oprot.writeListEnd();
           }
@@ -506,9 +506,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(WARNINGS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.warnings.size()));
-            for (String _iter919 : struct.warnings)
+            for (String _iter911 : struct.warnings)
             {
-              oprot.writeString(_iter919);
+              oprot.writeString(_iter911);
             }
             oprot.writeListEnd();
           }
@@ -543,18 +543,18 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetErrors()) {
         {
           oprot.writeI32(struct.errors.size());
-          for (String _iter920 : struct.errors)
+          for (String _iter912 : struct.errors)
           {
-            oprot.writeString(_iter920);
+            oprot.writeString(_iter912);
           }
         }
       }
       if (struct.isSetWarnings()) {
         {
           oprot.writeI32(struct.warnings.size());
-          for (String _iter921 : struct.warnings)
+          for (String _iter913 : struct.warnings)
           {
-            oprot.writeString(_iter921);
+            oprot.writeString(_iter913);
           }
         }
       }
@@ -566,26 +566,26 @@ import org.slf4j.LoggerFactory;
       BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         {
-          org.apache.thrift.protocol.TList _list922 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.errors = new ArrayList<String>(_list922.size);
-          String _elem923;
-          for (int _i924 = 0; _i924 < _list922.size; ++_i924)
+          org.apache.thrift.protocol.TList _list914 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.errors = new ArrayList<String>(_list914.size);
+          String _elem915;
+          for (int _i916 = 0; _i916 < _list914.size; ++_i916)
           {
-            _elem923 = iprot.readString();
-            struct.errors.add(_elem923);
+            _elem915 = iprot.readString();
+            struct.errors.add(_elem915);
           }
         }
         struct.setErrorsIsSet(true);
       }
       if (incoming.get(1)) {
         {
-          org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-          struct.warnings = new ArrayList<String>(_list925.size);
-          String _elem926;
-          for (int _i927 = 0; _i927 < _list925.size; ++_i927)
+          org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.warnings = new ArrayList<String>(_list917.size);
+          String _elem918;
+          for (int _i919 = 0; _i919 < _list917.size; ++_i919)
           {
-            _elem926 = iprot.readString();
-            struct.warnings.add(_elem926);
+            _elem918 = iprot.readString();
+            struct.warnings.add(_elem918);
           }
         }
         struct.setWarningsIsSet(true);


[09/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query64.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query64.q.out b/ql/src/test/results/clientpositive/perf/tez/query64.q.out
index a673b9f..6d3edd3 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query64.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query64.q.out
@@ -237,31 +237,32 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 40 <- Reducer 23 (BROADCAST_EDGE), Reducer 34 (BROADCAST_EDGE), Reducer 38 (BROADCAST_EDGE), Reducer 43 (BROADCAST_EDGE), Reducer 47 (BROADCAST_EDGE), Reducer 52 (BROADCAST_EDGE), Reducer 58 (BROADCAST_EDGE), Reducer 61 (BROADCAST_EDGE), Reducer 62 (BROADCAST_EDGE), Reducer 66 (BROADCAST_EDGE)
-Map 49 <- Reducer 55 (BROADCAST_EDGE), Reducer 56 (BROADCAST_EDGE), Reducer 61 (BROADCAST_EDGE)
-Map 68 <- Reducer 31 (BROADCAST_EDGE), Reducer 35 (BROADCAST_EDGE), Reducer 39 (BROADCAST_EDGE), Reducer 45 (BROADCAST_EDGE), Reducer 48 (BROADCAST_EDGE), Reducer 53 (BROADCAST_EDGE), Reducer 59 (BROADCAST_EDGE), Reducer 63 (BROADCAST_EDGE), Reducer 64 (BROADCAST_EDGE), Reducer 67 (BROADCAST_EDGE)
+Map 40 <- Reducer 23 (BROADCAST_EDGE), Reducer 34 (BROADCAST_EDGE), Reducer 38 (BROADCAST_EDGE), Reducer 43 (BROADCAST_EDGE), Reducer 47 (BROADCAST_EDGE), Reducer 52 (BROADCAST_EDGE), Reducer 62 (BROADCAST_EDGE), Reducer 65 (BROADCAST_EDGE), Reducer 66 (BROADCAST_EDGE), Reducer 70 (BROADCAST_EDGE)
+Map 49 <- Reducer 43 (BROADCAST_EDGE), Reducer 54 (BROADCAST_EDGE), Reducer 55 (BROADCAST_EDGE), Reducer 65 (BROADCAST_EDGE)
+Map 72 <- Reducer 31 (BROADCAST_EDGE), Reducer 35 (BROADCAST_EDGE), Reducer 39 (BROADCAST_EDGE), Reducer 43 (BROADCAST_EDGE), Reducer 45 (BROADCAST_EDGE), Reducer 48 (BROADCAST_EDGE), Reducer 58 (BROADCAST_EDGE), Reducer 63 (BROADCAST_EDGE), Reducer 67 (BROADCAST_EDGE), Reducer 68 (BROADCAST_EDGE), Reducer 71 (BROADCAST_EDGE)
+Map 73 <- Reducer 45 (BROADCAST_EDGE), Reducer 59 (BROADCAST_EDGE), Reducer 60 (BROADCAST_EDGE), Reducer 67 (BROADCAST_EDGE)
 Reducer 10 <- Reducer 14 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
 Reducer 11 <- Reducer 10 (SIMPLE_EDGE)
 Reducer 12 <- Reducer 30 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
-Reducer 13 <- Map 65 (SIMPLE_EDGE), Reducer 12 (SIMPLE_EDGE)
+Reducer 13 <- Map 69 (SIMPLE_EDGE), Reducer 12 (SIMPLE_EDGE)
 Reducer 14 <- Reducer 13 (SIMPLE_EDGE)
 Reducer 16 <- Map 15 (SIMPLE_EDGE), Reducer 41 (SIMPLE_EDGE)
 Reducer 17 <- Map 46 (SIMPLE_EDGE), Reducer 16 (SIMPLE_EDGE)
 Reducer 18 <- Reducer 17 (SIMPLE_EDGE), Reducer 33 (SIMPLE_EDGE)
 Reducer 19 <- Reducer 18 (SIMPLE_EDGE), Reducer 51 (ONE_TO_ONE_EDGE)
 Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 15 (SIMPLE_EDGE)
-Reducer 20 <- Map 57 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE)
+Reducer 20 <- Map 61 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE)
 Reducer 21 <- Map 37 (SIMPLE_EDGE), Reducer 20 (SIMPLE_EDGE)
-Reducer 22 <- Map 60 (SIMPLE_EDGE), Reducer 21 (SIMPLE_EDGE)
+Reducer 22 <- Map 64 (SIMPLE_EDGE), Reducer 21 (SIMPLE_EDGE)
 Reducer 23 <- Map 15 (CUSTOM_SIMPLE_EDGE)
 Reducer 24 <- Map 15 (SIMPLE_EDGE), Reducer 44 (SIMPLE_EDGE)
 Reducer 25 <- Map 46 (SIMPLE_EDGE), Reducer 24 (SIMPLE_EDGE)
 Reducer 26 <- Reducer 25 (SIMPLE_EDGE), Reducer 33 (SIMPLE_EDGE)
-Reducer 27 <- Reducer 26 (SIMPLE_EDGE), Reducer 51 (ONE_TO_ONE_EDGE)
-Reducer 28 <- Map 57 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE)
+Reducer 27 <- Reducer 26 (SIMPLE_EDGE), Reducer 57 (ONE_TO_ONE_EDGE)
+Reducer 28 <- Map 61 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE)
 Reducer 29 <- Map 37 (SIMPLE_EDGE), Reducer 28 (SIMPLE_EDGE)
 Reducer 3 <- Map 15 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
-Reducer 30 <- Map 60 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE)
+Reducer 30 <- Map 64 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE)
 Reducer 31 <- Map 15 (CUSTOM_SIMPLE_EDGE)
 Reducer 33 <- Map 32 (SIMPLE_EDGE), Map 36 (SIMPLE_EDGE)
 Reducer 34 <- Reducer 33 (CUSTOM_SIMPLE_EDGE)
@@ -271,28 +272,32 @@ Reducer 39 <- Map 37 (CUSTOM_SIMPLE_EDGE)
 Reducer 4 <- Reducer 3 (SIMPLE_EDGE), Reducer 33 (SIMPLE_EDGE)
 Reducer 41 <- Map 40 (SIMPLE_EDGE), Map 42 (SIMPLE_EDGE)
 Reducer 43 <- Map 42 (CUSTOM_SIMPLE_EDGE)
-Reducer 44 <- Map 42 (SIMPLE_EDGE), Map 68 (SIMPLE_EDGE)
+Reducer 44 <- Map 42 (SIMPLE_EDGE), Map 72 (SIMPLE_EDGE)
 Reducer 45 <- Map 42 (CUSTOM_SIMPLE_EDGE)
 Reducer 47 <- Map 46 (CUSTOM_SIMPLE_EDGE)
 Reducer 48 <- Map 46 (CUSTOM_SIMPLE_EDGE)
 Reducer 5 <- Map 37 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE)
-Reducer 50 <- Map 49 (SIMPLE_EDGE), Map 54 (SIMPLE_EDGE)
+Reducer 50 <- Map 49 (SIMPLE_EDGE), Map 53 (SIMPLE_EDGE)
 Reducer 51 <- Reducer 50 (SIMPLE_EDGE)
 Reducer 52 <- Reducer 51 (CUSTOM_SIMPLE_EDGE)
-Reducer 53 <- Reducer 51 (CUSTOM_SIMPLE_EDGE)
-Reducer 55 <- Map 54 (CUSTOM_SIMPLE_EDGE)
-Reducer 56 <- Map 54 (CUSTOM_SIMPLE_EDGE)
-Reducer 58 <- Map 57 (CUSTOM_SIMPLE_EDGE)
-Reducer 59 <- Map 57 (CUSTOM_SIMPLE_EDGE)
-Reducer 6 <- Map 65 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE)
-Reducer 61 <- Map 60 (CUSTOM_SIMPLE_EDGE)
-Reducer 62 <- Map 60 (CUSTOM_SIMPLE_EDGE)
-Reducer 63 <- Map 60 (CUSTOM_SIMPLE_EDGE)
-Reducer 64 <- Map 60 (CUSTOM_SIMPLE_EDGE)
-Reducer 66 <- Map 65 (CUSTOM_SIMPLE_EDGE)
-Reducer 67 <- Map 65 (CUSTOM_SIMPLE_EDGE)
+Reducer 54 <- Map 53 (CUSTOM_SIMPLE_EDGE)
+Reducer 55 <- Map 53 (CUSTOM_SIMPLE_EDGE)
+Reducer 56 <- Map 53 (SIMPLE_EDGE), Map 73 (SIMPLE_EDGE)
+Reducer 57 <- Reducer 56 (SIMPLE_EDGE)
+Reducer 58 <- Reducer 57 (CUSTOM_SIMPLE_EDGE)
+Reducer 59 <- Map 53 (CUSTOM_SIMPLE_EDGE)
+Reducer 6 <- Map 69 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE)
+Reducer 60 <- Map 53 (CUSTOM_SIMPLE_EDGE)
+Reducer 62 <- Map 61 (CUSTOM_SIMPLE_EDGE)
+Reducer 63 <- Map 61 (CUSTOM_SIMPLE_EDGE)
+Reducer 65 <- Map 64 (CUSTOM_SIMPLE_EDGE)
+Reducer 66 <- Map 64 (CUSTOM_SIMPLE_EDGE)
+Reducer 67 <- Map 64 (CUSTOM_SIMPLE_EDGE)
+Reducer 68 <- Map 64 (CUSTOM_SIMPLE_EDGE)
 Reducer 7 <- Reducer 22 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE)
-Reducer 8 <- Map 65 (SIMPLE_EDGE), Reducer 7 (SIMPLE_EDGE)
+Reducer 70 <- Map 69 (CUSTOM_SIMPLE_EDGE)
+Reducer 71 <- Map 69 (CUSTOM_SIMPLE_EDGE)
+Reducer 8 <- Map 69 (SIMPLE_EDGE), Reducer 7 (SIMPLE_EDGE)
 Reducer 9 <- Reducer 8 (SIMPLE_EDGE)
 
 Stage-0
@@ -300,8 +305,8 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 11 vectorized
-      File Output Operator [FS_1230]
-        Select Operator [SEL_1229] (rows=273897192 width=88)
+      File Output Operator [FS_1283]
+        Select Operator [SEL_1282] (rows=273897192 width=88)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18","_col19","_col20"]
         <-Reducer 10 [SIMPLE_EDGE]
           SHUFFLE [RS_259]
@@ -309,14 +314,14 @@ Stage-0
               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17","_col18"]
               Filter Operator [FIL_257] (rows=273897192 width=88)
                 predicate:(_col19 <= _col12)
-                Merge Join Operator [MERGEJOIN_1055] (rows=821691577 width=88)
-                  Conds:RS_1202._col2, _col1, _col3=RS_1228._col1, _col0, _col2(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col19","_col20","_col21","_col22"]
+                Merge Join Operator [MERGEJOIN_1087] (rows=821691577 width=88)
+                  Conds:RS_1239._col2, _col1, _col3=RS_1281._col1, _col0, _col2(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col19","_col20","_col21","_col22"]
                 <-Reducer 14 [SIMPLE_EDGE] vectorized
-                  SHUFFLE [RS_1228]
+                  SHUFFLE [RS_1281]
                     PartitionCols:_col1, _col0, _col2
-                    Select Operator [SEL_1227] (rows=746992327 width=88)
+                    Select Operator [SEL_1280] (rows=746992327 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                      Group By Operator [GBY_1226] (rows=746992327 width=88)
+                      Group By Operator [GBY_1279] (rows=746992327 width=88)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"],aggregations:["count(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4, KEY._col5, KEY._col6, KEY._col7, KEY._col8, KEY._col9, KEY._col10, KEY._col11, KEY._col12, KEY._col13
                       <-Reducer 13 [SIMPLE_EDGE]
                         SHUFFLE [RS_251]
@@ -327,102 +332,102 @@ Stage-0
                               Output:["_col7","_col9","_col14","_col15","_col16","_col17","_col23","_col24","_col25","_col26","_col28","_col29","_col43","_col44","_col45","_col46","_col49"]
                               Filter Operator [FIL_248] (rows=1493984654 width=88)
                                 predicate:(_col56 <> _col19)
-                                Merge Join Operator [MERGEJOIN_1054] (rows=1493984654 width=88)
-                                  Conds:RS_245._col37=RS_1097._col0(Inner),Output:["_col7","_col9","_col14","_col15","_col16","_col17","_col19","_col23","_col24","_col25","_col26","_col28","_col29","_col43","_col44","_col45","_col46","_col49","_col56"]
-                                <-Map 65 [SIMPLE_EDGE] vectorized
-                                  SHUFFLE [RS_1097]
+                                Merge Join Operator [MERGEJOIN_1086] (rows=1493984654 width=88)
+                                  Conds:RS_245._col37=RS_1129._col0(Inner),Output:["_col7","_col9","_col14","_col15","_col16","_col17","_col19","_col23","_col24","_col25","_col26","_col28","_col29","_col43","_col44","_col45","_col46","_col49","_col56"]
+                                <-Map 69 [SIMPLE_EDGE] vectorized
+                                  SHUFFLE [RS_1129]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_1093] (rows=1861800 width=385)
+                                    Select Operator [SEL_1125] (rows=1861800 width=385)
                                       Output:["_col0","_col1"]
-                                      Filter Operator [FIL_1092] (rows=1861800 width=385)
+                                      Filter Operator [FIL_1124] (rows=1861800 width=385)
                                         predicate:cd_demo_sk is not null
                                         TableScan [TS_97] (rows=1861800 width=385)
                                           default@customer_demographics,cd1,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status"]
                                 <-Reducer 12 [SIMPLE_EDGE]
                                   SHUFFLE [RS_245]
                                     PartitionCols:_col37
-                                    Merge Join Operator [MERGEJOIN_1053] (rows=1358167838 width=88)
+                                    Merge Join Operator [MERGEJOIN_1085] (rows=1358167838 width=88)
                                       Conds:RS_242._col0=RS_243._col16(Inner),Output:["_col7","_col9","_col14","_col15","_col16","_col17","_col19","_col23","_col24","_col25","_col26","_col28","_col29","_col37","_col43","_col44","_col45","_col46","_col49"]
                                     <-Reducer 6 [SIMPLE_EDGE]
                                       SHUFFLE [RS_242]
                                         PartitionCols:_col0
-                                        Merge Join Operator [MERGEJOIN_1024] (rows=128840811 width=860)
-                                          Conds:RS_112._col1=RS_1096._col0(Inner),Output:["_col0","_col7","_col9","_col14","_col15","_col16","_col17","_col19"]
-                                        <-Map 65 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_1096]
+                                        Merge Join Operator [MERGEJOIN_1056] (rows=128840811 width=860)
+                                          Conds:RS_112._col1=RS_1128._col0(Inner),Output:["_col0","_col7","_col9","_col14","_col15","_col16","_col17","_col19"]
+                                        <-Map 69 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_1128]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_1093]
+                                             Please refer to the previous Select Operator [SEL_1125]
                                         <-Reducer 5 [SIMPLE_EDGE]
                                           SHUFFLE [RS_112]
                                             PartitionCols:_col1
-                                            Merge Join Operator [MERGEJOIN_1023] (rows=117128008 width=860)
-                                              Conds:RS_109._col3=RS_1083._col0(Inner),Output:["_col0","_col1","_col7","_col9","_col14","_col15","_col16","_col17"]
+                                            Merge Join Operator [MERGEJOIN_1055] (rows=117128008 width=860)
+                                              Conds:RS_109._col3=RS_1115._col0(Inner),Output:["_col0","_col1","_col7","_col9","_col14","_col15","_col16","_col17"]
                                             <-Map 37 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_1083]
+                                              SHUFFLE [RS_1115]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_1082] (rows=40000000 width=1014)
+                                                Select Operator [SEL_1114] (rows=40000000 width=1014)
                                                   Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                  Filter Operator [FIL_1081] (rows=40000000 width=1014)
+                                                  Filter Operator [FIL_1113] (rows=40000000 width=1014)
                                                     predicate:ca_address_sk is not null
                                                     TableScan [TS_19] (rows=40000000 width=1014)
                                                       default@customer_address,ad2,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_street_number","ca_street_name","ca_city","ca_zip"]
                                             <-Reducer 4 [SIMPLE_EDGE]
                                               SHUFFLE [RS_109]
                                                 PartitionCols:_col3
-                                                Merge Join Operator [MERGEJOIN_1022] (rows=106480005 width=860)
+                                                Merge Join Operator [MERGEJOIN_1054] (rows=106480005 width=860)
                                                   Conds:RS_106._col2=RS_107._col0(Inner),Output:["_col0","_col1","_col3","_col7","_col9"]
                                                 <-Reducer 33 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_107]
                                                     PartitionCols:_col0
-                                                    Merge Join Operator [MERGEJOIN_1021] (rows=7920 width=107)
-                                                      Conds:RS_1077._col1=RS_1080._col0(Inner),Output:["_col0"]
+                                                    Merge Join Operator [MERGEJOIN_1053] (rows=7920 width=107)
+                                                      Conds:RS_1109._col1=RS_1112._col0(Inner),Output:["_col0"]
                                                     <-Map 32 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1077]
+                                                      SHUFFLE [RS_1109]
                                                         PartitionCols:_col1
-                                                        Select Operator [SEL_1076] (rows=7200 width=107)
+                                                        Select Operator [SEL_1108] (rows=7200 width=107)
                                                           Output:["_col0","_col1"]
-                                                          Filter Operator [FIL_1075] (rows=7200 width=107)
+                                                          Filter Operator [FIL_1107] (rows=7200 width=107)
                                                             predicate:(hd_demo_sk is not null and hd_income_band_sk is not null)
                                                             TableScan [TS_9] (rows=7200 width=107)
                                                               default@household_demographics,hd2,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_income_band_sk"]
                                                     <-Map 36 [SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_1080]
+                                                      SHUFFLE [RS_1112]
                                                         PartitionCols:_col0
-                                                        Select Operator [SEL_1079] (rows=20 width=12)
+                                                        Select Operator [SEL_1111] (rows=20 width=12)
                                                           Output:["_col0"]
-                                                          Filter Operator [FIL_1078] (rows=20 width=12)
+                                                          Filter Operator [FIL_1110] (rows=20 width=12)
                                                             predicate:ib_income_band_sk is not null
                                                             TableScan [TS_12] (rows=20 width=12)
                                                               default@income_band,ib2,Tbl:COMPLETE,Col:NONE,Output:["ib_income_band_sk"]
                                                 <-Reducer 3 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_106]
                                                     PartitionCols:_col2
-                                                    Merge Join Operator [MERGEJOIN_1020] (rows=96800003 width=860)
-                                                      Conds:RS_103._col4=RS_1066._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col7","_col9"]
+                                                    Merge Join Operator [MERGEJOIN_1052] (rows=96800003 width=860)
+                                                      Conds:RS_103._col4=RS_1098._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col7","_col9"]
                                                     <-Map 15 [SIMPLE_EDGE] vectorized
-                                                      PARTITION_ONLY_SHUFFLE [RS_1066]
+                                                      PARTITION_ONLY_SHUFFLE [RS_1098]
                                                         PartitionCols:_col0
-                                                        Select Operator [SEL_1062] (rows=73049 width=1119)
+                                                        Select Operator [SEL_1094] (rows=73049 width=1119)
                                                           Output:["_col0","_col1"]
-                                                          Filter Operator [FIL_1059] (rows=73049 width=1119)
+                                                          Filter Operator [FIL_1091] (rows=73049 width=1119)
                                                             predicate:d_date_sk is not null
                                                             TableScan [TS_3] (rows=73049 width=1119)
                                                               default@date_dim,d2,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
                                                     <-Reducer 2 [SIMPLE_EDGE]
                                                       SHUFFLE [RS_103]
                                                         PartitionCols:_col4
-                                                        Merge Join Operator [MERGEJOIN_1019] (rows=88000001 width=860)
-                                                          Conds:RS_1058._col5=RS_1065._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col7"]
+                                                        Merge Join Operator [MERGEJOIN_1051] (rows=88000001 width=860)
+                                                          Conds:RS_1090._col5=RS_1097._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col7"]
                                                         <-Map 15 [SIMPLE_EDGE] vectorized
-                                                          PARTITION_ONLY_SHUFFLE [RS_1065]
+                                                          PARTITION_ONLY_SHUFFLE [RS_1097]
                                                             PartitionCols:_col0
-                                                             Please refer to the previous Select Operator [SEL_1062]
+                                                             Please refer to the previous Select Operator [SEL_1094]
                                                         <-Map 1 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_1058]
+                                                          SHUFFLE [RS_1090]
                                                             PartitionCols:_col5
-                                                            Select Operator [SEL_1057] (rows=80000000 width=860)
+                                                            Select Operator [SEL_1089] (rows=80000000 width=860)
                                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                                              Filter Operator [FIL_1056] (rows=80000000 width=860)
+                                                              Filter Operator [FIL_1088] (rows=80000000 width=860)
                                                                 predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null and c_customer_sk is not null and c_first_sales_date_sk is not null and c_first_shipto_date_sk is not null)
                                                                 TableScan [TS_0] (rows=80000000 width=860)
                                                                   default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk","c_first_shipto_date_sk","c_first_sales_date_sk"]
@@ -431,291 +436,297 @@ Stage-0
                                         PartitionCols:_col16
                                         Select Operator [SEL_223] (rows=1234698008 width=88)
                                           Output:["_col3","_col4","_col5","_col6","_col8","_col9","_col16","_col17","_col23","_col24","_col25","_col26","_col29"]
-                                          Merge Join Operator [MERGEJOIN_1052] (rows=1234698008 width=88)
-                                            Conds:RS_220._col5, _col12=RS_1149._col0, _col1(Inner),Output:["_col6","_col7","_col13","_col14","_col15","_col16","_col19","_col26","_col27","_col29","_col30","_col31","_col32"]
-                                          <-Map 60 [SIMPLE_EDGE] vectorized
-                                            PARTITION_ONLY_SHUFFLE [RS_1149]
+                                          Merge Join Operator [MERGEJOIN_1084] (rows=1234698008 width=88)
+                                            Conds:RS_220._col5, _col12=RS_1190._col0, _col1(Inner),Output:["_col6","_col7","_col13","_col14","_col15","_col16","_col19","_col26","_col27","_col29","_col30","_col31","_col32"]
+                                          <-Map 64 [SIMPLE_EDGE] vectorized
+                                            PARTITION_ONLY_SHUFFLE [RS_1190]
                                               PartitionCols:_col0, _col1
-                                              Select Operator [SEL_1145] (rows=57591150 width=77)
+                                              Select Operator [SEL_1186] (rows=57591150 width=77)
                                                 Output:["_col0","_col1"]
-                                                Filter Operator [FIL_1144] (rows=57591150 width=77)
+                                                Filter Operator [FIL_1185] (rows=57591150 width=77)
                                                   predicate:(sr_item_sk is not null and sr_ticket_number is not null)
                                                   TableScan [TS_75] (rows=57591150 width=77)
                                                     default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number"]
                                           <-Reducer 29 [SIMPLE_EDGE]
                                             SHUFFLE [RS_220]
                                               PartitionCols:_col5, _col12
-                                              Merge Join Operator [MERGEJOIN_1051] (rows=1122452711 width=88)
-                                                Conds:RS_217._col9=RS_1086._col0(Inner),Output:["_col5","_col6","_col7","_col12","_col13","_col14","_col15","_col16","_col19","_col26","_col27","_col29","_col30","_col31","_col32"]
+                                              Merge Join Operator [MERGEJOIN_1083] (rows=1122452711 width=88)
+                                                Conds:RS_217._col9=RS_1118._col0(Inner),Output:["_col5","_col6","_col7","_col12","_col13","_col14","_col15","_col16","_col19","_col26","_col27","_col29","_col30","_col31","_col32"]
                                               <-Map 37 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_1086]
+                                                SHUFFLE [RS_1118]
                                                   PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_1082]
+                                                   Please refer to the previous Select Operator [SEL_1114]
                                               <-Reducer 28 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_217]
                                                   PartitionCols:_col9
-                                                  Merge Join Operator [MERGEJOIN_1050] (rows=1020411534 width=88)
-                                                    Conds:RS_214._col10=RS_1183._col0(Inner),Output:["_col5","_col6","_col7","_col9","_col12","_col13","_col14","_col15","_col16","_col19","_col26","_col27"]
-                                                  <-Map 57 [SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_1183]
+                                                  Merge Join Operator [MERGEJOIN_1082] (rows=1020411534 width=88)
+                                                    Conds:RS_214._col10=RS_1220._col0(Inner),Output:["_col5","_col6","_col7","_col9","_col12","_col13","_col14","_col15","_col16","_col19","_col26","_col27"]
+                                                  <-Map 61 [SIMPLE_EDGE] vectorized
+                                                    PARTITION_ONLY_SHUFFLE [RS_1220]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_1180] (rows=1704 width=1910)
+                                                      Select Operator [SEL_1217] (rows=1704 width=1910)
                                                         Output:["_col0","_col1","_col2"]
-                                                        Filter Operator [FIL_1179] (rows=1704 width=1910)
+                                                        Filter Operator [FIL_1216] (rows=1704 width=1910)
                                                           predicate:(s_store_name is not null and s_store_sk is not null and s_zip is not null)
                                                           TableScan [TS_69] (rows=1704 width=1910)
                                                             default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_name","s_zip"]
                                                   <-Reducer 27 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_214]
                                                       PartitionCols:_col10
-                                                      Merge Join Operator [MERGEJOIN_1049] (rows=927646829 width=88)
-                                                        Conds:RS_211._col5=RS_1171._col0(Inner),Output:["_col5","_col6","_col7","_col9","_col10","_col12","_col13","_col14","_col15","_col16","_col19"]
-                                                      <-Reducer 51 [ONE_TO_ONE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_1171]
+                                                      Merge Join Operator [MERGEJOIN_1081] (rows=927646829 width=88)
+                                                        Conds:RS_211._col5=RS_1262._col0(Inner),Output:["_col5","_col6","_col7","_col9","_col10","_col12","_col13","_col14","_col15","_col16","_col19"]
+                                                      <-Reducer 57 [ONE_TO_ONE_EDGE] vectorized
+                                                        PARTITION_ONLY_SHUFFLE [RS_1262]
                                                           PartitionCols:_col0
-                                                          Select Operator [SEL_1168] (rows=52798137 width=135)
+                                                          Select Operator [SEL_1261] (rows=52798137 width=135)
                                                             Output:["_col0"]
-                                                            Filter Operator [FIL_1167] (rows=52798137 width=135)
+                                                            Filter Operator [FIL_1260] (rows=52798137 width=135)
                                                               predicate:(_col1 > (2 * _col2))
-                                                              Group By Operator [GBY_1166] (rows=158394413 width=135)
+                                                              Group By Operator [GBY_1259] (rows=158394413 width=135)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
-                                                              <-Reducer 50 [SIMPLE_EDGE]
-                                                                SHUFFLE [RS_65]
+                                                              <-Reducer 56 [SIMPLE_EDGE]
+                                                                SHUFFLE [RS_192]
                                                                   PartitionCols:_col0
-                                                                  Group By Operator [GBY_64] (rows=316788826 width=135)
+                                                                  Group By Operator [GBY_191] (rows=316788826 width=135)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col1)","sum(_col2)"],keys:_col0
-                                                                    Select Operator [SEL_62] (rows=316788826 width=135)
+                                                                    Select Operator [SEL_189] (rows=316788826 width=135)
                                                                       Output:["_col0","_col1","_col2"]
-                                                                      Merge Join Operator [MERGEJOIN_1029] (rows=316788826 width=135)
-                                                                        Conds:RS_1165._col0, _col1=RS_1133._col0, _col1(Inner),Output:["_col0","_col2","_col5","_col6","_col7"]
-                                                                      <-Map 54 [SIMPLE_EDGE] vectorized
-                                                                        PARTITION_ONLY_SHUFFLE [RS_1133]
+                                                                      Merge Join Operator [MERGEJOIN_1079] (rows=316788826 width=135)
+                                                                        Conds:RS_1258._col0, _col1=RS_1170._col0, _col1(Inner),Output:["_col0","_col2","_col5","_col6","_col7"]
+                                                                      <-Map 53 [SIMPLE_EDGE] vectorized
+                                                                        PARTITION_ONLY_SHUFFLE [RS_1170]
                                                                           PartitionCols:_col0, _col1
-                                                                          Select Operator [SEL_1132] (rows=28798881 width=106)
+                                                                          Select Operator [SEL_1166] (rows=28798881 width=106)
                                                                             Output:["_col0","_col1","_col2","_col3","_col4"]
-                                                                            Filter Operator [FIL_1131] (rows=28798881 width=106)
+                                                                            Filter Operator [FIL_1165] (rows=28798881 width=106)
                                                                               predicate:(cr_item_sk is not null and cr_order_number is not null)
                                                                               TableScan [TS_56] (rows=28798881 width=106)
                                                                                 default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_refunded_cash","cr_reversed_charge","cr_store_credit"]
-                                                                      <-Map 49 [SIMPLE_EDGE] vectorized
-                                                                        SHUFFLE [RS_1165]
+                                                                      <-Map 73 [SIMPLE_EDGE] vectorized
+                                                                        SHUFFLE [RS_1258]
                                                                           PartitionCols:_col0, _col1
-                                                                          Select Operator [SEL_1164] (rows=287989836 width=135)
+                                                                          Select Operator [SEL_1257] (rows=287989836 width=135)
                                                                             Output:["_col0","_col1","_col2"]
-                                                                            Filter Operator [FIL_1163] (rows=287989836 width=135)
-                                                                              predicate:((cs_item_sk BETWEEN DynamicValue(RS_60_catalog_returns_cr_item_sk_min) AND DynamicValue(RS_60_catalog_returns_cr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_60_catalog_returns_cr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_94_store_returns_sr_item_sk_min) AND DynamicValue(RS_94_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_94_store_returns_sr_item_sk_bloom_filter))) and (cs_order_number BETWEEN DynamicValue(RS_60_catalog_returns_cr_order_number_min) AND DynamicValue(RS_60_catalog_returns_cr_order_number_max) and in_bloom_filter(cs_order_number, DynamicValue(RS_60_catalog_returns_cr_order_number_bloom_filter))) and cs_item_sk is not null and cs_order_number is not null)
-                                                                              TableScan [TS_53] (rows=287989836 width=135)
+                                                                            Filter Operator [FIL_1256] (rows=287989836 width=135)
+                                                                              predicate:((cs_item_sk BETWEEN DynamicValue(RS_171_item_i_item_sk_min) AND DynamicValue(RS_171_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_171_item_i_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_187_catalog_returns_cr_item_sk_min) AND DynamicValue(RS_187_catalog_returns_cr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_187_catalog_returns_cr_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_221_store_returns_sr_item_sk_min) AND DynamicValue(RS_221_store_returns_sr_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_221_store_returns_sr_item_sk_bloom_filter))) and (cs_order_number BETWEEN DynamicValue(RS_187_catalog_returns_cr_order_number_min) AND DynamicValue(RS_187_catalog_returns_cr_order_number_max) and in_bloom_filter(cs_order_number, DynamicValue(RS_187_catalog_returns_cr_order_number_bloom_filter))) and cs_item_sk is not
  null and cs_order_number is not null)
+                                                                              TableScan [TS_180] (rows=287989836 width=135)
                                                                                 default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_item_sk","cs_order_number","cs_ext_list_price"]
-                                                                              <-Reducer 61 [BROADCAST_EDGE] vectorized
-                                                                                BROADCAST [RS_1162]
-                                                                                  Group By Operator [GBY_1160] (rows=1 width=12)
+                                                                              <-Reducer 45 [BROADCAST_EDGE] vectorized
+                                                                                BROADCAST [RS_1242]
+                                                                                  Group By Operator [GBY_1240] (rows=1 width=12)
+                                                                                    Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                  <-Map 42 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                    PARTITION_ONLY_SHUFFLE [RS_1144]
+                                                                                      Group By Operator [GBY_1142] (rows=1 width=12)
+                                                                                        Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                        Select Operator [SEL_1140] (rows=2851 width=1436)
+                                                                                          Output:["_col0"]
+                                                                                          Select Operator [SEL_1136] (rows=2851 width=1436)
+                                                                                            Output:["_col0","_col3"]
+                                                                                            Filter Operator [FIL_1135] (rows=2851 width=1436)
+                                                                                              predicate:((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45 and i_current_price BETWEEN 36 AND 50 and i_item_sk is not null)
+                                                                                              TableScan [TS_34] (rows=462000 width=1436)
+                                                                                                default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_color","i_product_name"]
+                                                                              <-Reducer 67 [BROADCAST_EDGE] vectorized
+                                                                                BROADCAST [RS_1255]
+                                                                                  Group By Operator [GBY_1253] (rows=1 width=12)
                                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
-                                                                                  <-Map 60 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                    PARTITION_ONLY_SHUFFLE [RS_1156]
-                                                                                      Group By Operator [GBY_1152] (rows=1 width=12)
+                                                                                  <-Map 64 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                    PARTITION_ONLY_SHUFFLE [RS_1199]
+                                                                                      Group By Operator [GBY_1195] (rows=1 width=12)
                                                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                                                        Select Operator [SEL_1147] (rows=57591150 width=77)
+                                                                                        Select Operator [SEL_1191] (rows=57591150 width=77)
                                                                                           Output:["_col0"]
-                                                                                           Please refer to the previous Select Operator [SEL_1145]
-                                                                              <-Reducer 55 [BROADCAST_EDGE] vectorized
-                                                                                BROADCAST [RS_1141]
-                                                                                  Group By Operator [GBY_1140] (rows=1 width=12)
+                                                                                           Please refer to the previous Select Operator [SEL_1186]
+                                                                              <-Reducer 59 [BROADCAST_EDGE] vectorized
+                                                                                BROADCAST [RS_1250]
+                                                                                  Group By Operator [GBY_1249] (rows=1 width=12)
                                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=28798880)"]
-                                                                                  <-Map 54 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                    PARTITION_ONLY_SHUFFLE [RS_1138]
-                                                                                      Group By Operator [GBY_1136] (rows=1 width=12)
+                                                                                  <-Map 53 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                    PARTITION_ONLY_SHUFFLE [RS_1179]
+                                                                                      Group By Operator [GBY_1175] (rows=1 width=12)
                                                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=28798880)"]
-                                                                                        Select Operator [SEL_1134] (rows=28798881 width=106)
+                                                                                        Select Operator [SEL_1171] (rows=28798881 width=106)
                                                                                           Output:["_col0"]
-                                                                                           Please refer to the previous Select Operator [SEL_1132]
-                                                                              <-Reducer 56 [BROADCAST_EDGE] vectorized
-                                                                                BROADCAST [RS_1143]
-                                                                                  Group By Operator [GBY_1142] (rows=1 width=12)
+                                                                                           Please refer to the previous Select Operator [SEL_1166]
+                                                                              <-Reducer 60 [BROADCAST_EDGE] vectorized
+                                                                                BROADCAST [RS_1252]
+                                                                                  Group By Operator [GBY_1251] (rows=1 width=12)
                                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=28798880)"]
-                                                                                  <-Map 54 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                    PARTITION_ONLY_SHUFFLE [RS_1139]
-                                                                                      Group By Operator [GBY_1137] (rows=1 width=12)
+                                                                                  <-Map 53 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                    PARTITION_ONLY_SHUFFLE [RS_1180]
+                                                                                      Group By Operator [GBY_1176] (rows=1 width=12)
                                                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=28798880)"]
-                                                                                        Select Operator [SEL_1135] (rows=28798881 width=106)
+                                                                                        Select Operator [SEL_1172] (rows=28798881 width=106)
                                                                                           Output:["_col0"]
-                                                                                           Please refer to the previous Select Operator [SEL_1132]
+                                                                                           Please refer to the previous Select Operator [SEL_1166]
                                                       <-Reducer 26 [SIMPLE_EDGE]
                                                         SHUFFLE [RS_211]
                                                           PartitionCols:_col5
-                                                          Merge Join Operator [MERGEJOIN_1048] (rows=843315281 width=88)
+                                                          Merge Join Operator [MERGEJOIN_1080] (rows=843315281 width=88)
                                                             Conds:RS_208._col0=RS_209._col5(Inner),Output:["_col5","_col6","_col7","_col9","_col10","_col12","_col13","_col14","_col15","_col16","_col19"]
                                                           <-Reducer 33 [SIMPLE_EDGE]
                                                             SHUFFLE [RS_208]
                                                               PartitionCols:_col0
-                                                               Please refer to the previous Merge Join Operator [MERGEJOIN_1021]
+                                                               Please refer to the previous Merge Join Operator [MERGEJOIN_1053]
                                                           <-Reducer 25 [SIMPLE_EDGE]
                                                             SHUFFLE [RS_209]
                                                               PartitionCols:_col5
                                                               Select Operator [SEL_179] (rows=766650239 width=88)
                                                                 Output:["_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col13","_col16"]
-                                                                Merge Join Operator [MERGEJOIN_1046] (rows=766650239 width=88)
-                                                                  Conds:RS_176._col7=RS_1121._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col15"]
+                                                                Merge Join Operator [MERGEJOIN_1078] (rows=766650239 width=88)
+                                                                  Conds:RS_176._col7=RS_1155._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col8","_col9","_col10","_col11","_col12","_col15"]
                                                                 <-Map 46 [SIMPLE_EDGE] vectorized
-                                                                  PARTITION_ONLY_SHUFFLE [RS_1121]
+                                                                  PARTITION_ONLY_SHUFFLE [RS_1155]
                                                                     PartitionCols:_col0
-                                                                    Select Operator [SEL_1118] (rows=2300 width=1179)
+                                                                    Select Operator [SEL_1152] (rows=2300 width=1179)
                                                                       Output:["_col0"]
-                                                                      Filter Operator [FIL_1117] (rows=2300 width=1179)
+                                                                      Filter Operator [FIL_1151] (rows=2300 width=1179)
                                                                         predicate:p_promo_sk is not null
                                                                         TableScan [TS_40] (rows=2300 width=1179)
                                                                           default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk"]
                                                                 <-Reducer 24 [SIMPLE_EDGE]
                                                                   SHUFFLE [RS_176]
                                                                     PartitionCols:_col7
-                                                                    Merge Join Operator [MERGEJOIN_1045] (rows=696954748 width=88)
-                                                                      Conds:RS_173._col0=RS_1069._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col15"]
+                                                                    Merge Join Operator [MERGEJOIN_1077] (rows=696954748 width=88)
+                                                                      Conds:RS_173._col0=RS_1101._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col15"]
                                                                     <-Map 15 [SIMPLE_EDGE] vectorized
-                                                                      PARTITION_ONLY_SHUFFLE [RS_1069]
+                                                                      PARTITION_ONLY_SHUFFLE [RS_1101]
                                                                         PartitionCols:_col0
-                                                                        Select Operator [SEL_1064] (rows=36524 width=1119)
+                                                                        Select Operator [SEL_1096] (rows=36524 width=1119)
                                                                           Output:["_col0"]
-                                                                          Filter Operator [FIL_1061] (rows=36524 width=1119)
+                                                                          Filter Operator [FIL_1093] (rows=36524 width=1119)
                                                                             predicate:((d_year = 2001) and d_date_sk is not null)
                                                                              Please refer to the previous TableScan [TS_3]
                                                                     <-Reducer 44 [SIMPLE_EDGE]
                                                                       SHUFFLE [RS_173]
                                                                         PartitionCols:_col0
-                                                                        Merge Join Operator [MERGEJOIN_1044] (rows=633595212 width=88)
-                                                                          Conds:RS_1225._col1=RS_1107._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col15"]
+                                                                        Merge Join Operator [MERGEJOIN_1076] (rows=633595212 width=88)
+                                                                          Conds:RS_1278._col1=RS_1139._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col15"]
                                                                         <-Map 42 [SIMPLE_EDGE] vectorized
-                                                                          PARTITION_ONLY_SHUFFLE [RS_1107]
+                                                                          PARTITION_ONLY_SHUFFLE [RS_1139]
                                                                             PartitionCols:_col0
-                                                                            Select Operator [SEL_1104] (rows=2851 width=1436)
-                                                                              Output:["_col0","_col3"]
-                                                                              Filter Operator [FIL_1103] (rows=2851 width=1436)
-                                                                                predicate:((i_color) IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate') and i_current_price BETWEEN 35 AND 45 and i_current_price BETWEEN 36 AND 50 and i_item_sk is not null)
-                                                                                TableScan [TS_34] (rows=462000 width=1436)
-                                                                                  default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price","i_color","i_product_name"]
-                                                                        <-Map 68 [SIMPLE_EDGE] vectorized
-                                                                          SHUFFLE [RS_1225]
+                                                                             Please refer to the previous Select Operator [SEL_1136]
+                                                                        <-Map 72 [SIMPLE_EDGE] vectorized
+                                                                          SHUFFLE [RS_1278]
                                                                             PartitionCols:_col1
-                                                                            Select Operator [SEL_1224] (rows=575995635 width=88)
+                                                                            Select Operator [SEL_1277] (rows=575995635 width=88)
                                                                               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"]
-                                                                              Filter Operator [FIL_1223] (rows=575995635 width=88)
-                                                                                predicate:((ss_addr_sk BETWEEN DynamicValue(RS_218_ad1_ca_address_sk_min) AND DynamicValue(RS_218_ad1_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_218_ad1_ca_address_sk_bloom_filter))) and (ss_cdemo_sk BETWEEN DynamicValue(RS_246_cd1_cd_demo_sk_min) AND DynamicValue(RS_246_cd1_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_246_cd1_cd_demo_sk_bloom_filter))) and (ss_hdemo_sk BETWEEN DynamicValue(RS_208_hd1_hd_demo_sk_min) AND DynamicValue(RS_208_hd1_hd_demo_sk_max) and in_bloom_filter(ss_hdemo_sk, DynamicValue(RS_208_hd1_hd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_171_item_i_item_sk_min) AND DynamicValue(RS_171_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_171_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_212_catalog_sales_cs_item_sk_min) AND DynamicValue(RS_212_catalog_sales_cs_item_sk_max) and i
 n_bloom_filter(ss_item_sk, DynamicValue(RS_212_catalog_sales_cs_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_221_store_returns_sr_item_sk_min) AND DynamicValue(RS_221_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_221_store_returns_sr_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_177_promotion_p_promo_sk_min) AND DynamicValue(RS_177_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_177_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_174_d1_d_date_sk_min) AND DynamicValue(RS_174_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_174_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_215_store_s_store_sk_min) AND DynamicValue(RS_215_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_215_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_221_store_returns_sr_ticket_number_mi
 n) AND DynamicValue(RS_221_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_221_store_returns_sr_ticket_number_bloom_filter))) and ss_addr_sk is not null and ss_cdemo_sk is not null and ss_customer_sk is not null and ss_hdemo_sk is not null and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
+                                                                              Filter Operator [FIL_1276] (rows=575995635 width=88)
+                                                                                predicate:((ss_addr_sk BETWEEN DynamicValue(RS_218_ad1_ca_address_sk_min) AND DynamicValue(RS_218_ad1_ca_address_sk_max) and in_bloom_filter(ss_addr_sk, DynamicValue(RS_218_ad1_ca_address_sk_bloom_filter))) and (ss_cdemo_sk BETWEEN DynamicValue(RS_246_cd1_cd_demo_sk_min) AND DynamicValue(RS_246_cd1_cd_demo_sk_max) and in_bloom_filter(ss_cdemo_sk, DynamicValue(RS_246_cd1_cd_demo_sk_bloom_filter))) and (ss_hdemo_sk BETWEEN DynamicValue(RS_208_hd1_hd_demo_sk_min) AND DynamicValue(RS_208_hd1_hd_demo_sk_max) and in_bloom_filter(ss_hdemo_sk, DynamicValue(RS_208_hd1_hd_demo_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_171_item_i_item_sk_min) AND DynamicValue(RS_171_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_171_item_i_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_212_catalog_sales_cs_item_sk_min) AND DynamicValue(RS_212_catalog_sales_cs_item_sk_max) and i
 n_bloom_filter(ss_item_sk, DynamicValue(RS_212_catalog_sales_cs_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_221_store_returns_sr_item_sk_min) AND DynamicValue(RS_221_store_returns_sr_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_221_store_returns_sr_item_sk_bloom_filter))) and (ss_item_sk BETWEEN DynamicValue(RS_44_item_i_item_sk_min) AND DynamicValue(RS_44_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_44_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_177_promotion_p_promo_sk_min) AND DynamicValue(RS_177_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_177_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_174_d1_d_date_sk_min) AND DynamicValue(RS_174_d1_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_174_d1_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_215_store_s_store_sk_min) AND DynamicValue(RS_215_sto
 re_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_215_store_s_store_sk_bloom_filter))) and (ss_ticket_number BETWEEN DynamicValue(RS_221_store_returns_sr_ticket_number_min) AND DynamicValue(RS_221_store_returns_sr_ticket_number_max) and in_bloom_filter(ss_ticket_number, DynamicValue(RS_221_store_returns_sr_ticket_number_bloom_filter))) and ss_addr_sk is not null and ss_cdemo_sk is not null and ss_customer_sk is not null and ss_hdemo_sk is not null and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null and ss_ticket_number is not null)
                                                                                 TableScan [TS_158] (rows=575995635 width=88)
                                                                                   default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_cdemo_sk","ss_hdemo_sk","ss_addr_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_wholesale_cost","ss_list_price","ss_coupon_amt"]
+                                                                                <-Reducer 43 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1148]
+                                                                                    Group By Operator [GBY_1145] (rows=1 width=12)
+                                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                                                                    <-Map 42 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1143]
+                                                                                        Group By Operator [GBY_1141] (rows=1 width=12)
+                                                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                                                          Select Operator [SEL_1138] (rows=2851 width=1436)
+                                                                                            Output:["_col0"]
+                                                                                             Please refer to the previous Select Operator [SEL_1136]
+                                                                                <-Reducer 45 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1241]
+                                                                                     Please refer to the previous Group By Operator [GBY_1240]
+                                                                                <-Reducer 67 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1254]
+                                                                                     Please refer to the previous Group By Operator [GBY_1253]
                                                                                 <-Reducer 31 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1206]
-                                                                                    Group By Operator [GBY_1205] (rows=1 width=12)
+                                                                                  BROADCAST [RS_1244]
+                                                                                    Group By Operator [GBY_1243] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                                     <-Map 15 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1074]
-                                                                                        Group By Operator [GBY_1072] (rows=1 width=12)
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1106]
+                                                                                        Group By Operator [GBY_1104] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                          Select Operator [SEL_1070] (rows=36524 width=1119)
+                                                                                          Select Operator [SEL_1102] (rows=36524 width=1119)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1064]
+                                                                                             Please refer to the previous Select Operator [SEL_1096]
                                                                                 <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1210]
-                                                                                    Group By Operator [GBY_1209] (rows=1 width=12)
+                                                                                  BROADCAST [RS_1248]
+                                                                                    Group By Operator [GBY_1247] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                                     <-Reducer 33 [CUSTOM_SIMPLE_EDGE]
-                                                                                      SHUFFLE [RS_895]
-                                                                                        Group By Operator [GBY_894] (rows=1 width=12)
+                                                                                      SHUFFLE [RS_909]
+                                                                                        Group By Operator [GBY_908] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                          Select Operator [SEL_893] (rows=7920 width=107)
+                                                                                          Select Operator [SEL_907] (rows=7920 width=107)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_1021]
+                                                                                             Please refer to the previous Merge Join Operator [MERGEJOIN_1053]
                                                                                 <-Reducer 39 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1218]
-                                                                                    Group By Operator [GBY_1217] (rows=1 width=12)
+                                                                                  BROADCAST [RS_1271]
+                                                                                    Group By Operator [GBY_1270] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=40000000)"]
                                                                                     <-Map 37 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      SHUFFLE [RS_1091]
-                                                                                        Group By Operator [GBY_1089] (rows=1 width=12)
+                                                                                      SHUFFLE [RS_1123]
+                                                                                        Group By Operator [GBY_1121] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=40000000)"]
-                                                                                          Select Operator [SEL_1087] (rows=40000000 width=1014)
-                                                                                            Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1082]
-                                                                                <-Reducer 45 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1204]
-                                                                                    Group By Operator [GBY_1203] (rows=1 width=12)
-                                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                                    <-Map 42 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1112]
-                                                                                        Group By Operator [GBY_1110] (rows=1 width=12)
-                                                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                          Select Operator [SEL_1108] (rows=2851 width=1436)
+                                                                                          Select Operator [SEL_1119] (rows=40000000 width=1014)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1104]
+                                                                                             Please refer to the previous Select Operator [SEL_1114]
                                                                                 <-Reducer 48 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1208]
-                                                                                    Group By Operator [GBY_1207] (rows=1 width=12)
+                                                                                  BROADCAST [RS_1246]
+                                                                                    Group By Operator [GBY_1245] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                                     <-Map 46 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1126]
-                                                                                        Group By Operator [GBY_1124] (rows=1 width=12)
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1160]
+                                                                                        Group By Operator [GBY_1158] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                          Select Operator [SEL_1122] (rows=2300 width=1179)
+                                                                                          Select Operator [SEL_1156] (rows=2300 width=1179)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1118]
-                                                                                <-Reducer 53 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1212]
-                                                                                    Group By Operator [GBY_1211] (rows=1 width=228)
+                                                                                             Please refer to the previous Select Operator [SEL_1152]
+                                                                                <-Reducer 58 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1267]
+                                                                                    Group By Operator [GBY_1266] (rows=1 width=228)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=52798136)"]
-                                                                                    <-Reducer 51 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1176]
-                                                                                        Group By Operator [GBY_1174] (rows=1 width=228)
+                                                                                    <-Reducer 57 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1265]
+                                                                                        Group By Operator [GBY_1264] (rows=1 width=228)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=52798136)"]
-                                                                                          Select Operator [SEL_1172] (rows=52798137 width=135)
+                                                                                          Select Operator [SEL_1263] (rows=52798137 width=135)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1168]
-                                                                                <-Reducer 59 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1216]
-                                                                                    Group By Operator [GBY_1215] (rows=1 width=12)
+                                                                                             Please refer to the previous Select Operator [SEL_1261]
+                                                                                <-Reducer 63 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1269]
+                                                                                    Group By Operator [GBY_1268] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                                    <-Map 57 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1188]
-                                                                                        Group By Operator [GBY_1186] (rows=1 width=12)
+                                                                                    <-Map 61 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1225]
+                                                                                        Group By Operator [GBY_1223] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                          Select Operator [SEL_1184] (rows=1704 width=1910)
+                                                                                          Select Operator [SEL_1221] (rows=1704 width=1910)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1180]
-                                                                                <-Reducer 63 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1214]
-                                                                                    Group By Operator [GBY_1213] (rows=1 width=12)
+                                                                                             Please refer to the previous Select Operator [SEL_1217]
+                                                                                <-Reducer 68 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1273]
+                                                                                    Group By Operator [GBY_1272] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
-                                                                                    <-Map 60 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1158]
-                                                                                        Group By Operator [GBY_1154] (rows=1 width=12)
+                                                                                    <-Map 64 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                      PARTITION_ONLY_SHUFFLE [RS_1200]
+                                                                                        Group By Operator [GBY_1196] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                                                          Select Operator [SEL_1150] (rows=57591150 width=77)
+                                                                                          Select Operator [SEL_1192] (rows=57591150 width=77)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1145]
-                                                                                <-Reducer 64 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1220]
-                                                                                    Group By Operator [GBY_1219] (rows=1 width=12)
-                                                                                      Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=57591152)"]
-                                                                                    <-Map 60 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      PARTITION_ONLY_SHUFFLE [RS_1159]
-                                                                                        Group By Operator [GBY_1155] (rows=1 width=12)
-                                                                                          Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=57591152)"]
-                                                                                          Select Operator [SEL_1151] (rows=57591150 width=77)
-                                                                                            Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1145]
-                                                                                <-Reducer 67 [BROADCAST_EDGE] vectorized
-                                                                                  BROADCAST [RS_1222]
-                                                                                    Group By Operator [GBY_1221] (rows=1 width=12)
+                                                                                             Please refer to the previous Select Operator [SEL_1186]
+                                                                                <-Reducer 71 [BROADCAST_EDGE] vectorized
+                                                                                  BROADCAST [RS_1275]
+                                                                                    Group By Operator [GBY_1274] (rows=1 width=12)
                                                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1861800)"]
-                                                                                    <-Map 65 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                      SHUFFLE [RS_1102]
-                                                                                        Group By Operator [GBY_1100] (rows=1 width=12)
+                                                                                    <-Map 69 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                                      SHUFFLE [RS_1134]
+                                                                                        Group By Operator [GBY_1132] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1861800)"]
-                                                                                          Select Operator [SEL_1098] (rows=1861800 width=385)
+                                                                                          Select Operator [SEL_1130] (rows=1861800 width=385)
                                                                                             Output:["_col0"]
-                                                                                             Please refer to the previous Select Operator [SEL_1093]
+                                                                                             Please refer to the previous Select Operator [SEL_1125]
                 <-Reducer 9 [SIMPLE_EDGE] vectorized
-                  SHUFFLE [RS_1202]
+                  SHUFFLE [RS_1239]
                     PartitionCols:_col2, _col1, _col3
-                    Select Operator [SEL_1201] (rows=746992327 width=88)
+                    Select Operator [SEL_1238] (rows=746992327 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15"]
-        

<TRUNCATED>

[06/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query80.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query80.q.out b/ql/src/test/results/clientpositive/perf/tez/query80.q.out
index 816b525..ee8cdd8 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query80.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query80.q.out
@@ -232,26 +232,26 @@ Stage-0
     limit:100
     Stage-1
       Reducer 10 vectorized
-      File Output Operator [FS_457]
-        Limit [LIM_456] (rows=100 width=108)
+      File Output Operator [FS_460]
+        Limit [LIM_459] (rows=100 width=108)
           Number of rows:100
-          Select Operator [SEL_455] (rows=1217531358 width=108)
+          Select Operator [SEL_458] (rows=1217531358 width=108)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 9 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_454]
-              Select Operator [SEL_453] (rows=1217531358 width=108)
+            SHUFFLE [RS_457]
+              Select Operator [SEL_456] (rows=1217531358 width=108)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_452] (rows=1217531358 width=108)
+                Group By Operator [GBY_455] (rows=1217531358 width=108)
                   Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Union 8 [SIMPLE_EDGE]
                   <-Reducer 18 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_481]
+                    Reduce Output Operator [RS_484]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_480] (rows=2435062716 width=108)
+                      Group By Operator [GBY_483] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_479] (rows=231905279 width=135)
+                        Select Operator [SEL_482] (rows=231905279 width=135)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_478] (rows=231905279 width=135)
+                          Group By Operator [GBY_481] (rows=231905279 width=135)
                             Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
                           <-Reducer 17 [SIMPLE_EDGE]
                             SHUFFLE [RS_75]
@@ -260,134 +260,134 @@ Stage-0
                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
                                 Select Operator [SEL_72] (rows=463810558 width=135)
                                   Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_365] (rows=463810558 width=135)
-                                    Conds:RS_69._col1=RS_466._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                  Merge Join Operator [MERGEJOIN_368] (rows=463810558 width=135)
+                                    Conds:RS_69._col1=RS_469._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
                                   <-Map 39 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_466]
+                                    PARTITION_ONLY_SHUFFLE [RS_469]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_465] (rows=46000 width=460)
+                                      Select Operator [SEL_468] (rows=46000 width=460)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_464] (rows=46000 width=460)
+                                        Filter Operator [FIL_467] (rows=46000 width=460)
                                           predicate:cp_catalog_page_sk is not null
                                           TableScan [TS_54] (rows=46000 width=460)
                                             default@catalog_page,catalog_page,Tbl:COMPLETE,Col:NONE,Output:["cp_catalog_page_sk","cp_catalog_page_id"]
                                   <-Reducer 16 [SIMPLE_EDGE]
                                     SHUFFLE [RS_69]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_364] (rows=421645953 width=135)
-                                        Conds:RS_66._col3=RS_422._col0(Inner),Output:["_col1","_col5","_col6","_col9","_col10"]
+                                      Merge Join Operator [MERGEJOIN_367] (rows=421645953 width=135)
+                                        Conds:RS_66._col3=RS_425._col0(Inner),Output:["_col1","_col5","_col6","_col9","_col10"]
                                       <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_422]
+                                        SHUFFLE [RS_425]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_419] (rows=1150 width=1179)
+                                          Select Operator [SEL_422] (rows=1150 width=1179)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_418] (rows=1150 width=1179)
+                                            Filter Operator [FIL_421] (rows=1150 width=1179)
                                               predicate:((p_channel_tv = 'N') and p_promo_sk is not null)
                                               TableScan [TS_12] (rows=2300 width=1179)
                                                 default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk","p_channel_tv"]
                                       <-Reducer 15 [SIMPLE_EDGE]
                                         SHUFFLE [RS_66]
                                           PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_363] (rows=383314495 width=135)
-                                            Conds:RS_63._col2=RS_406._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col9","_col10"]
+                                          Merge Join Operator [MERGEJOIN_366] (rows=383314495 width=135)
+                                            Conds:RS_63._col2=RS_409._col0(Inner),Output:["_col1","_col3","_col5","_col6","_col9","_col10"]
                                           <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_406]
+                                            SHUFFLE [RS_409]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_403] (rows=154000 width=1436)
+                                              Select Operator [SEL_406] (rows=154000 width=1436)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_402] (rows=154000 width=1436)
+                                                Filter Operator [FIL_405] (rows=154000 width=1436)
                                                   predicate:((i_current_price > 50) and i_item_sk is not null)
                                                   TableScan [TS_9] (rows=462000 width=1436)
                                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_current_price"]
                                           <-Reducer 14 [SIMPLE_EDGE]
                                             SHUFFLE [RS_63]
                                               PartitionCols:_col2
-                                              Merge Join Operator [MERGEJOIN_362] (rows=348467716 width=135)
-                                                Conds:RS_60._col0=RS_390._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                              Merge Join Operator [MERGEJOIN_365] (rows=348467716 width=135)
+                                                Conds:RS_60._col0=RS_393._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                               <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_390]
+                                                SHUFFLE [RS_393]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_387] (rows=8116 width=1119)
+                                                  Select Operator [SEL_390] (rows=8116 width=1119)
                                                     Output:["_col0"]
-                                                    Filter Operator [FIL_386] (rows=8116 width=1119)
+                                                    Filter Operator [FIL_389] (rows=8116 width=1119)
                                                       predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
                                                       TableScan [TS_6] (rows=73049 width=1119)
                                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                               <-Reducer 37 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_60]
                                                   PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_361] (rows=316788826 width=135)
-                                                    Conds:RS_474._col2, _col4=RS_477._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                  Merge Join Operator [MERGEJOIN_364] (rows=316788826 width=135)
+                                                    Conds:RS_477._col2, _col4=RS_480._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                                   <-Map 36 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_474]
+                                                    SHUFFLE [RS_477]
                                                       PartitionCols:_col2, _col4
-                                                      Select Operator [SEL_473] (rows=287989836 width=135)
+                                                      Select Operator [SEL_476] (rows=287989836 width=135)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_472] (rows=287989836 width=135)
+                                                        Filter Operator [FIL_475] (rows=287989836 width=135)
                                                           predicate:((cs_catalog_page_sk BETWEEN DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_min) AND DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_max) and in_bloom_filter(cs_catalog_page_sk, DynamicValue(RS_70_catalog_page_cp_catalog_page_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_64_item_i_item_sk_min) AND DynamicValue(RS_64_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_64_item_i_item_sk_bloom_filter))) and (cs_promo_sk BETWEEN DynamicValue(RS_67_promotion_p_promo_sk_min) AND DynamicValue(RS_67_promotion_p_promo_sk_max) and in_bloom_filter(cs_promo_sk, DynamicValue(RS_67_promotion_p_promo_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_61_date_dim_d_date_sk_min) AND DynamicValue(RS_61_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_61_date_dim_d_date_sk_bloom_filter))) and cs_catalog_page_sk is not null and cs_item_sk is not null and cs_pr
 omo_sk is not null and cs_sold_date_sk is not null)
                                                           TableScan [TS_39] (rows=287989836 width=135)
                                                             default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_catalog_page_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_ext_sales_price","cs_net_profit"]
                                                           <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_459]
-                                                              Group By Operator [GBY_458] (rows=1 width=12)
+                                                            BROADCAST [RS_462]
+                                                              Group By Operator [GBY_461] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_398]
-                                                                  Group By Operator [GBY_395] (rows=1 width=12)
+                                                                SHUFFLE [RS_401]
+                                                                  Group By Operator [GBY_398] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_391] (rows=8116 width=1119)
+                                                                    Select Operator [SEL_394] (rows=8116 width=1119)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_387]
+                                                                       Please refer to the previous Select Operator [SEL_390]
                                                           <-Reducer 28 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_461]
-                                                              Group By Operator [GBY_460] (rows=1 width=12)
+                                                            BROADCAST [RS_464]
+                                                              Group By Operator [GBY_463] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_414]
-                                                                  Group By Operator [GBY_411] (rows=1 width=12)
+                                                                SHUFFLE [RS_417]
+                                                                  Group By Operator [GBY_414] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_407] (rows=154000 width=1436)
+                                                                    Select Operator [SEL_410] (rows=154000 width=1436)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_403]
+                                                                       Please refer to the previous Select Operator [SEL_406]
                                                           <-Reducer 32 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_463]
-                                                              Group By Operator [GBY_462] (rows=1 width=12)
+                                                            BROADCAST [RS_466]
+                                                              Group By Operator [GBY_465] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_430]
-                                                                  Group By Operator [GBY_427] (rows=1 width=12)
+                                                                SHUFFLE [RS_433]
+                                                                  Group By Operator [GBY_430] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_423] (rows=1150 width=1179)
+                                                                    Select Operator [SEL_426] (rows=1150 width=1179)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_419]
+                                                                       Please refer to the previous Select Operator [SEL_422]
                                                           <-Reducer 40 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_471]
-                                                              Group By Operator [GBY_470] (rows=1 width=12)
+                                                            BROADCAST [RS_474]
+                                                              Group By Operator [GBY_473] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 39 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                PARTITION_ONLY_SHUFFLE [RS_469]
-                                                                  Group By Operator [GBY_468] (rows=1 width=12)
+                                                                PARTITION_ONLY_SHUFFLE [RS_472]
+                                                                  Group By Operator [GBY_471] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_467] (rows=46000 width=460)
+                                                                    Select Operator [SEL_470] (rows=46000 width=460)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_465]
+                                                                       Please refer to the previous Select Operator [SEL_468]
                                                   <-Map 38 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_477]
+                                                    SHUFFLE [RS_480]
                                                       PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_476] (rows=28798881 width=106)
+                                                      Select Operator [SEL_479] (rows=28798881 width=106)
                                                         Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_475] (rows=28798881 width=106)
+                                                        Filter Operator [FIL_478] (rows=28798881 width=106)
                                                           predicate:cr_item_sk is not null
                                                           TableScan [TS_42] (rows=28798881 width=106)
                                                             default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_return_amount","cr_net_loss"]
                   <-Reducer 24 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_505]
+                    Reduce Output Operator [RS_508]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_504] (rows=2435062716 width=108)
+                      Group By Operator [GBY_507] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_503] (rows=115958879 width=135)
+                        Select Operator [SEL_506] (rows=115958879 width=135)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_502] (rows=115958879 width=135)
+                          Group By Operator [GBY_505] (rows=115958879 width=135)
                             Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
                           <-Reducer 23 [SIMPLE_EDGE]
                             SHUFFLE [RS_115]
@@ -396,119 +396,119 @@ Stage-0
                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
                                 Select Operator [SEL_112] (rows=231917759 width=135)
                                   Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_370] (rows=231917759 width=135)
-                                    Conds:RS_109._col2=RS_490._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                  Merge Join Operator [MERGEJOIN_373] (rows=231917759 width=135)
+                                    Conds:RS_109._col2=RS_493._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
                                   <-Map 44 [SIMPLE_EDGE] vectorized
-                                    PARTITION_ONLY_SHUFFLE [RS_490]
+                                    PARTITION_ONLY_SHUFFLE [RS_493]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_489] (rows=84 width=1850)
+                                      Select Operator [SEL_492] (rows=84 width=1850)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_488] (rows=84 width=1850)
+                                        Filter Operator [FIL_491] (rows=84 width=1850)
                                           predicate:web_site_sk is not null
                                           TableScan [TS_94] (rows=84 width=1850)
                                             default@web_site,web_site,Tbl:COMPLETE,Col:NONE,Output:["web_site_sk","web_site_id"]
                                   <-Reducer 22 [SIMPLE_EDGE]
                                     SHUFFLE [RS_109]
                                       PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_369] (rows=210834322 width=135)
-                                        Conds:RS_106._col3=RS_424._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
+                                      Merge Join Operator [MERGEJOIN_372] (rows=210834322 width=135)
+                                        Conds:RS_106._col3=RS_427._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
                                       <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_424]
+                                        SHUFFLE [RS_427]
                                           PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_419]
+                                           Please refer to the previous Select Operator [SEL_422]
                                       <-Reducer 21 [SIMPLE_EDGE]
                                         SHUFFLE [RS_106]
                                           PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_368] (rows=191667562 width=135)
-                                            Conds:RS_103._col1=RS_408._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
+                                          Merge Join Operator [MERGEJOIN_371] (rows=191667562 width=135)
+                                            Conds:RS_103._col1=RS_411._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
                                           <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_408]
+                                            SHUFFLE [RS_411]
                                               PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_403]
+                                               Please refer to the previous Select Operator [SEL_406]
                                           <-Reducer 20 [SIMPLE_EDGE]
                                             SHUFFLE [RS_103]
                                               PartitionCols:_col1
-                                              Merge Join Operator [MERGEJOIN_367] (rows=174243235 width=135)
-                                                Conds:RS_100._col0=RS_392._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                              Merge Join Operator [MERGEJOIN_370] (rows=174243235 width=135)
+                                                Conds:RS_100._col0=RS_395._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                               <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_392]
+                                                SHUFFLE [RS_395]
                                                   PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_387]
+                                                   Please refer to the previous Select Operator [SEL_390]
                                               <-Reducer 42 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_100]
                                                   PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_366] (rows=158402938 width=135)
-                                                    Conds:RS_498._col1, _col4=RS_501._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                  Merge Join Operator [MERGEJOIN_369] (rows=158402938 width=135)
+                                                    Conds:RS_501._col1, _col4=RS_504._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                                   <-Map 41 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_498]
+                                                    SHUFFLE [RS_501]
                                                       PartitionCols:_col1, _col4
-                                                      Select Operator [SEL_497] (rows=144002668 width=135)
+                                                      Select Operator [SEL_500] (rows=144002668 width=135)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_496] (rows=144002668 width=135)
+                                                        Filter Operator [FIL_499] (rows=144002668 width=135)
                                                           predicate:((ws_item_sk BETWEEN DynamicValue(RS_104_item_i_item_sk_min) AND DynamicValue(RS_104_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_104_item_i_item_sk_bloom_filter))) and (ws_promo_sk BETWEEN DynamicValue(RS_107_promotion_p_promo_sk_min) AND DynamicValue(RS_107_promotion_p_promo_sk_max) and in_bloom_filter(ws_promo_sk, DynamicValue(RS_107_promotion_p_promo_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_101_date_dim_d_date_sk_min) AND DynamicValue(RS_101_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_101_date_dim_d_date_sk_bloom_filter))) and (ws_web_site_sk BETWEEN DynamicValue(RS_110_web_site_web_site_sk_min) AND DynamicValue(RS_110_web_site_web_site_sk_max) and in_bloom_filter(ws_web_site_sk, DynamicValue(RS_110_web_site_web_site_sk_bloom_filter))) and ws_item_sk is not null and ws_promo_sk is not null and ws_sold_date_sk is not null and ws_web_si
 te_sk is not null)
                                                           TableScan [TS_79] (rows=144002668 width=135)
                                                             default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_web_site_sk","ws_promo_sk","ws_order_number","ws_ext_sales_price","ws_net_profit"]
                                                           <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_483]
-                                                              Group By Operator [GBY_482] (rows=1 width=12)
+                                                            BROADCAST [RS_486]
+                                                              Group By Operator [GBY_485] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_399]
-                                                                  Group By Operator [GBY_396] (rows=1 width=12)
+                                                                SHUFFLE [RS_402]
+                                                                  Group By Operator [GBY_399] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_393] (rows=8116 width=1119)
+                                                                    Select Operator [SEL_396] (rows=8116 width=1119)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_387]
+                                                                       Please refer to the previous Select Operator [SEL_390]
                                                           <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_485]
-                                                              Group By Operator [GBY_484] (rows=1 width=12)
+                                                            BROADCAST [RS_488]
+                                                              Group By Operator [GBY_487] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_415]
-                                                                  Group By Operator [GBY_412] (rows=1 width=12)
+                                                                SHUFFLE [RS_418]
+                                                                  Group By Operator [GBY_415] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_409] (rows=154000 width=1436)
+                                                                    Select Operator [SEL_412] (rows=154000 width=1436)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_403]
+                                                                       Please refer to the previous Select Operator [SEL_406]
                                                           <-Reducer 33 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_487]
-                                                              Group By Operator [GBY_486] (rows=1 width=12)
+                                                            BROADCAST [RS_490]
+                                                              Group By Operator [GBY_489] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_431]
-                                                                  Group By Operator [GBY_428] (rows=1 width=12)
+                                                                SHUFFLE [RS_434]
+                                                                  Group By Operator [GBY_431] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_425] (rows=1150 width=1179)
+                                                                    Select Operator [SEL_428] (rows=1150 width=1179)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_419]
+                                                                       Please refer to the previous Select Operator [SEL_422]
                                                           <-Reducer 45 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_495]
-                                                              Group By Operator [GBY_494] (rows=1 width=12)
+                                                            BROADCAST [RS_498]
+                                                              Group By Operator [GBY_497] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 44 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                PARTITION_ONLY_SHUFFLE [RS_493]
-                                                                  Group By Operator [GBY_492] (rows=1 width=12)
+                                                                PARTITION_ONLY_SHUFFLE [RS_496]
+                                                                  Group By Operator [GBY_495] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_491] (rows=84 width=1850)
+                                                                    Select Operator [SEL_494] (rows=84 width=1850)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_489]
+                                                                       Please refer to the previous Select Operator [SEL_492]
                                                   <-Map 43 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_501]
+                                                    SHUFFLE [RS_504]
                                                       PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_500] (rows=14398467 width=92)
+                                                      Select Operator [SEL_503] (rows=14398467 width=92)
                                                         Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_499] (rows=14398467 width=92)
+                                                        Filter Operator [FIL_502] (rows=14398467 width=92)
                                                           predicate:wr_item_sk is not null
                                                           TableScan [TS_82] (rows=14398467 width=92)
                                                             default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number","wr_return_amt","wr_net_loss"]
                   <-Reducer 7 [CONTAINS] vectorized
-                    Reduce Output Operator [RS_451]
+                    Reduce Output Operator [RS_454]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_450] (rows=2435062716 width=108)
+                      Group By Operator [GBY_453] (rows=2435062716 width=108)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_449] (rows=463823414 width=88)
+                        Select Operator [SEL_452] (rows=463823414 width=88)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Group By Operator [GBY_448] (rows=463823414 width=88)
+                          Group By Operator [GBY_451] (rows=463823414 width=88)
                             Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0
                           <-Reducer 6 [SIMPLE_EDGE]
                             SHUFFLE [RS_36]
@@ -517,108 +517,108 @@ Stage-0
                                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)"],keys:_col0
                                 Select Operator [SEL_33] (rows=927646829 width=88)
                                   Output:["_col0","_col1","_col2","_col3"]
-                                  Merge Join Operator [MERGEJOIN_360] (rows=927646829 width=88)
-                                    Conds:RS_30._col2=RS_436._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
+                                  Merge Join Operator [MERGEJOIN_363] (rows=927646829 width=88)
+                                    Conds:RS_30._col2=RS_439._col0(Inner),Output:["_col5","_col6","_col9","_col10","_col18"]
                                   <-Map 34 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_436]
+                                    SHUFFLE [RS_439]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_435] (rows=1704 width=1910)
+                                      Select Operator [SEL_438] (rows=1704 width=1910)
                                         Output:["_col0","_col1"]
-                                        Filter Operator [FIL_434] (rows=1704 width=1910)
+                                        Filter Operator [FIL_437] (rows=1704 width=1910)
                                           predicate:s_store_sk is not null
                                           TableScan [TS_15] (rows=1704 width=1910)
                                             default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
                                   <-Reducer 5 [SIMPLE_EDGE]
                                     SHUFFLE [RS_30]
                                       PartitionCols:_col2
-                                      Merge Join Operator [MERGEJOIN_359] (rows=843315281 width=88)
-                                        Conds:RS_27._col3=RS_420._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
+                                      Merge Join Operator [MERGEJOIN_362] (rows=843315281 width=88)
+                                        Conds:RS_27._col3=RS_423._col0(Inner),Output:["_col2","_col5","_col6","_col9","_col10"]
                                       <-Map 30 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_420]
+                                        SHUFFLE [RS_423]
                                           PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_419]
+                                           Please refer to the previous Select Operator [SEL_422]
                                       <-Reducer 4 [SIMPLE_EDGE]
                                         SHUFFLE [RS_27]
                                           PartitionCols:_col3
-                                          Merge Join Operator [MERGEJOIN_358] (rows=766650239 width=88)
-                                            Conds:RS_24._col1=RS_404._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
+                                          Merge Join Operator [MERGEJOIN_361] (rows=766650239 width=88)
+                                            Conds:RS_24._col1=RS_407._col0(Inner),Output:["_col2","_col3","_col5","_col6","_col9","_col10"]
                                           <-Map 26 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_404]
+                                            SHUFFLE [RS_407]
                                               PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_403]
+                                               Please refer to the previous Select Operator [SEL_406]
                                           <-Reducer 3 [SIMPLE_EDGE]
                                             SHUFFLE [RS_24]
                                               PartitionCols:_col1
-                                              Merge Join Operator [MERGEJOIN_357] (rows=696954748 width=88)
-                                                Conds:RS_21._col0=RS_388._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                              Merge Join Operator [MERGEJOIN_360] (rows=696954748 width=88)
+                                                Conds:RS_21._col0=RS_391._col0(Inner),Output:["_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                               <-Map 12 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_388]
+                                                SHUFFLE [RS_391]
                                                   PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_387]
+                                                   Please refer to the previous Select Operator [SEL_390]
                                               <-Reducer 2 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_21]
                                                   PartitionCols:_col0
-                                                  Merge Join Operator [MERGEJOIN_356] (rows=633595212 width=88)
-                                                    Conds:RS_444._col1, _col4=RS_447._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
+                                                  Merge Join Operator [MERGEJOIN_359] (rows=633595212 width=88)
+                                                    Conds:RS_447._col1, _col4=RS_450._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col9","_col10"]
                                                   <-Map 1 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_444]
+                                                    SHUFFLE [RS_447]
                                                       PartitionCols:_col1, _col4
-                                                      Select Operator [SEL_443] (rows=575995635 width=88)
+                                                      Select Operator [SEL_446] (rows=575995635 width=88)
                                                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                        Filter Operator [FIL_442] (rows=575995635 width=88)
+                                                        Filter Operator [FIL_445] (rows=575995635 width=88)
                                                           predicate:((ss_item_sk BETWEEN DynamicValue(RS_25_item_i_item_sk_min) AND DynamicValue(RS_25_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_25_item_i_item_sk_bloom_filter))) and (ss_promo_sk BETWEEN DynamicValue(RS_28_promotion_p_promo_sk_min) AND DynamicValue(RS_28_promotion_p_promo_sk_max) and in_bloom_filter(ss_promo_sk, DynamicValue(RS_28_promotion_p_promo_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_22_date_dim_d_date_sk_min) AND DynamicValue(RS_22_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_22_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_31_store_s_store_sk_min) AND DynamicValue(RS_31_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_31_store_s_store_sk_bloom_filter))) and ss_item_sk is not null and ss_promo_sk is not null and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                           TableScan [TS_0] (rows=575995635 width=88)
                                                             default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_store_sk","ss_promo_sk","ss_ticket_number","ss_ext_sales_price","ss_net_profit"]
                                                           <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_401]
-                                                              Group By Operator [GBY_400] (rows=1 width=12)
+                                                            BROADCAST [RS_404]
+                                                              Group By Operator [GBY_403] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 12 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_397]
-                                                                  Group By Operator [GBY_394] (rows=1 width=12)
+                                                                SHUFFLE [RS_400]
+                                                                  Group By Operator [GBY_397] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_389] (rows=8116 width=1119)
+                                                                    Select Operator [SEL_392] (rows=8116 width=1119)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_387]
+                                                                       Please refer to the previous Select Operator [SEL_390]
                                                           <-Reducer 27 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_417]
-                                                              Group By Operator [GBY_416] (rows=1 width=12)
+                                                            BROADCAST [RS_420]
+                                                              Group By Operator [GBY_419] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_413]
-                                                                  Group By Operator [GBY_410] (rows=1 width=12)
+                                                                SHUFFLE [RS_416]
+                                                                  Group By Operator [GBY_413] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_405] (rows=154000 width=1436)
+                                                                    Select Operator [SEL_408] (rows=154000 width=1436)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_403]
+                                                                       Please refer to the previous Select Operator [SEL_406]
                                                           <-Reducer 31 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_433]
-                                                              Group By Operator [GBY_432] (rows=1 width=12)
+                                                            BROADCAST [RS_436]
+                                                              Group By Operator [GBY_435] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_429]
-                                                                  Group By Operator [GBY_426] (rows=1 width=12)
+                                                                SHUFFLE [RS_432]
+                                                                  Group By Operator [GBY_429] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_421] (rows=1150 width=1179)
+                                                                    Select Operator [SEL_424] (rows=1150 width=1179)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_419]
+                                                                       Please refer to the previous Select Operator [SEL_422]
                                                           <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                            BROADCAST [RS_441]
-                                                              Group By Operator [GBY_440] (rows=1 width=12)
+                                                            BROADCAST [RS_444]
+                                                              Group By Operator [GBY_443] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                               <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_439]
-                                                                  Group By Operator [GBY_438] (rows=1 width=12)
+                                                                SHUFFLE [RS_442]
+                                                                  Group By Operator [GBY_441] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                    Select Operator [SEL_437] (rows=1704 width=1910)
+                                                                    Select Operator [SEL_440] (rows=1704 width=1910)
                                                                       Output:["_col0"]
-                                                                       Please refer to the previous Select Operator [SEL_435]
+                                                                       Please refer to the previous Select Operator [SEL_438]
                                                   <-Map 11 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_447]
+                                                    SHUFFLE [RS_450]
                                                       PartitionCols:_col0, _col1
-                                                      Select Operator [SEL_446] (rows=57591150 width=77)
+                                                      Select Operator [SEL_449] (rows=57591150 width=77)
                                                         Output:["_col0","_col1","_col2","_col3"]
-                                                        Filter Operator [FIL_445] (rows=57591150 width=77)
+                                                        Filter Operator [FIL_448] (rows=57591150 width=77)
                                                           predicate:sr_item_sk is not null
                                                           TableScan [TS_3] (rows=57591150 width=77)
                                                             default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number","sr_return_amt","sr_net_loss"]

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query91.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query91.q.out b/ql/src/test/results/clientpositive/perf/tez/query91.q.out
index 5e0f00a..a53c7d79 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query91.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query91.q.out
@@ -75,109 +75,109 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_168]
-        Select Operator [SEL_167] (rows=58564004 width=860)
+      File Output Operator [FS_170]
+        Select Operator [SEL_169] (rows=58564004 width=860)
           Output:["_col0","_col1","_col2","_col3"]
         <-Reducer 5 [SIMPLE_EDGE] vectorized
-          SHUFFLE [RS_166]
-            Select Operator [SEL_165] (rows=58564004 width=860)
+          SHUFFLE [RS_168]
+            Select Operator [SEL_167] (rows=58564004 width=860)
               Output:["_col0","_col1","_col2","_col4"]
-              Group By Operator [GBY_164] (rows=58564004 width=860)
+              Group By Operator [GBY_166] (rows=58564004 width=860)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
               <-Reducer 4 [SIMPLE_EDGE]
                 SHUFFLE [RS_42]
                   PartitionCols:_col0, _col1, _col2, _col3, _col4
                   Group By Operator [GBY_41] (rows=117128008 width=860)
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col3)"],keys:_col8, _col9, _col10, _col18, _col19
-                    Merge Join Operator [MERGEJOIN_142] (rows=117128008 width=860)
+                    Merge Join Operator [MERGEJOIN_144] (rows=117128008 width=860)
                       Conds:RS_37._col1=RS_38._col2(Inner),Output:["_col3","_col8","_col9","_col10","_col18","_col19"]
                     <-Reducer 12 [SIMPLE_EDGE]
                       SHUFFLE [RS_38]
                         PartitionCols:_col2
                         Select Operator [SEL_30] (rows=106480005 width=860)
                           Output:["_col2","_col7","_col8"]
-                          Merge Join Operator [MERGEJOIN_141] (rows=106480005 width=860)
-                            Conds:RS_27._col2=RS_163._col0(Inner),Output:["_col0","_col5","_col6"]
+                          Merge Join Operator [MERGEJOIN_143] (rows=106480005 width=860)
+                            Conds:RS_27._col2=RS_165._col0(Inner),Output:["_col0","_col5","_col6"]
                           <-Map 15 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_163]
+                            SHUFFLE [RS_165]
                               PartitionCols:_col0
-                              Select Operator [SEL_162] (rows=3600 width=107)
+                              Select Operator [SEL_164] (rows=3600 width=107)
                                 Output:["_col0"]
-                                Filter Operator [FIL_161] (rows=3600 width=107)
+                                Filter Operator [FIL_163] (rows=3600 width=107)
                                   predicate:((hd_buy_potential like '0-500%') and hd_demo_sk is not null)
                                   TableScan [TS_18] (rows=7200 width=107)
                                     default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential"]
                           <-Reducer 11 [SIMPLE_EDGE]
                             SHUFFLE [RS_27]
                               PartitionCols:_col2
-                              Merge Join Operator [MERGEJOIN_140] (rows=96800003 width=860)
-                                Conds:RS_24._col3=RS_160._col0(Inner),Output:["_col0","_col2","_col5","_col6"]
+                              Merge Join Operator [MERGEJOIN_142] (rows=96800003 width=860)
+                                Conds:RS_24._col3=RS_162._col0(Inner),Output:["_col0","_col2","_col5","_col6"]
                               <-Map 14 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_160]
+                                SHUFFLE [RS_162]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_159] (rows=20000000 width=1014)
+                                  Select Operator [SEL_161] (rows=20000000 width=1014)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_158] (rows=20000000 width=1014)
+                                    Filter Operator [FIL_160] (rows=20000000 width=1014)
                                       predicate:((ca_gmt_offset = -7) and ca_address_sk is not null)
                                       TableScan [TS_15] (rows=40000000 width=1014)
                                         default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_gmt_offset"]
                               <-Reducer 10 [SIMPLE_EDGE]
                                 SHUFFLE [RS_24]
                                   PartitionCols:_col3
-                                  Merge Join Operator [MERGEJOIN_139] (rows=88000001 width=860)
-                                    Conds:RS_154._col1=RS_157._col0(Inner),Output:["_col0","_col2","_col3","_col5","_col6"]
+                                  Merge Join Operator [MERGEJOIN_141] (rows=88000001 width=860)
+                                    Conds:RS_156._col1=RS_159._col0(Inner),Output:["_col0","_col2","_col3","_col5","_col6"]
                                   <-Map 13 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_157]
+                                    SHUFFLE [RS_159]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_156] (rows=930900 width=385)
+                                      Select Operator [SEL_158] (rows=930900 width=385)
                                         Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_155] (rows=930900 width=385)
+                                        Filter Operator [FIL_157] (rows=930900 width=385)
                                           predicate:((((cd_marital_status = 'M') and (cd_education_status = 'Unknown')) or ((cd_marital_status = 'W') and (cd_education_status = 'Advanced Degree'))) and ((cd_education_status = 'Unknown') or (cd_education_status = 'Advanced Degree')) and ((cd_marital_status = 'M') or (cd_marital_status = 'W')) and cd_demo_sk is not null)
                                           TableScan [TS_12] (rows=1861800 width=385)
                                             default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status","cd_education_status"]
                                   <-Map 9 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_154]
+                                    SHUFFLE [RS_156]
                                       PartitionCols:_col1
-                                      Select Operator [SEL_153] (rows=80000000 width=860)
+                                      Select Operator [SEL_155] (rows=80000000 width=860)
                                         Output:["_col0","_col1","_col2","_col3"]
-                                        Filter Operator [FIL_152] (rows=80000000 width=860)
+                                        Filter Operator [FIL_154] (rows=80000000 width=860)
                                           predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_current_hdemo_sk is not null and c_customer_sk is not null)
                                           TableScan [TS_9] (rows=80000000 width=860)
                                             default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_hdemo_sk","c_current_addr_sk"]
                     <-Reducer 3 [SIMPLE_EDGE]
                       SHUFFLE [RS_37]
                         PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_138] (rows=34846646 width=106)
-                          Conds:RS_34._col2=RS_151._col0(Inner),Output:["_col1","_col3","_col8","_col9","_col10"]
+                        Merge Join Operator [MERGEJOIN_140] (rows=34846646 width=106)
+                          Conds:RS_34._col2=RS_153._col0(Inner),Output:["_col1","_col3","_col8","_col9","_col10"]
                         <-Map 8 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_151]
+                          SHUFFLE [RS_153]
                             PartitionCols:_col0
-                            Select Operator [SEL_150] (rows=60 width=2045)
+                            Select Operator [SEL_152] (rows=60 width=2045)
                               Output:["_col0","_col1","_col2","_col3"]
-                              Filter Operator [FIL_149] (rows=60 width=2045)
+                              Filter Operator [FIL_151] (rows=60 width=2045)
                                 predicate:cc_call_center_sk is not null
                                 TableScan [TS_6] (rows=60 width=2045)
                                   default@call_center,call_center,Tbl:COMPLETE,Col:NONE,Output:["cc_call_center_sk","cc_call_center_id","cc_name","cc_manager"]
                         <-Reducer 2 [SIMPLE_EDGE]
                           SHUFFLE [RS_34]
                             PartitionCols:_col2
-                            Merge Join Operator [MERGEJOIN_137] (rows=31678769 width=106)
-                              Conds:RS_145._col0=RS_148._col0(Inner),Output:["_col1","_col2","_col3"]
+                            Merge Join Operator [MERGEJOIN_139] (rows=31678769 width=106)
+                              Conds:RS_147._col0=RS_150._col0(Inner),Output:["_col1","_col2","_col3"]
                             <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_145]
+                              SHUFFLE [RS_147]
                                 PartitionCols:_col0
-                                Select Operator [SEL_144] (rows=28798881 width=106)
+                                Select Operator [SEL_146] (rows=28798881 width=106)
                                   Output:["_col0","_col1","_col2","_col3"]
-                                  Filter Operator [FIL_143] (rows=28798881 width=106)
+                                  Filter Operator [FIL_145] (rows=28798881 width=106)
                                     predicate:(cr_call_center_sk is not null and cr_returned_date_sk is not null and cr_returning_customer_sk is not null)
                                     TableScan [TS_0] (rows=28798881 width=106)
                                       default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_returned_date_sk","cr_returning_customer_sk","cr_call_center_sk","cr_net_loss"]
                             <-Map 7 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_148]
+                              SHUFFLE [RS_150]
                                 PartitionCols:_col0
-                                Select Operator [SEL_147] (rows=18262 width=1119)
+                                Select Operator [SEL_149] (rows=18262 width=1119)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_146] (rows=18262 width=1119)
+                                  Filter Operator [FIL_148] (rows=18262 width=1119)
                                     predicate:((d_moy = 11) and (d_year = 1999) and d_date_sk is not null)
                                     TableScan [TS_3] (rows=73049 width=1119)
                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query92.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query92.q.out b/ql/src/test/results/clientpositive/perf/tez/query92.q.out
index 061fcf7..4b4afa9 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query92.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query92.q.out
@@ -59,34 +59,31 @@ POSTHOOK: type: QUERY
 Plan optimized by CBO.
 
 Vertex dependency in root stage
-Map 1 <- Reducer 7 (BROADCAST_EDGE)
-Map 12 <- Reducer 11 (BROADCAST_EDGE), Reducer 14 (BROADCAST_EDGE)
-Reducer 10 <- Map 13 (SIMPLE_EDGE), Reducer 9 (ONE_TO_ONE_EDGE)
-Reducer 11 <- Map 6 (CUSTOM_SIMPLE_EDGE)
-Reducer 14 <- Map 13 (CUSTOM_SIMPLE_EDGE)
-Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
-Reducer 3 <- Reducer 10 (ONE_TO_ONE_EDGE), Reducer 2 (SIMPLE_EDGE)
+Map 1 <- Reducer 11 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
+Reducer 11 <- Map 10 (CUSTOM_SIMPLE_EDGE)
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 8 (SIMPLE_EDGE)
+Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 7 (ONE_TO_ONE_EDGE)
 Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
 Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
-Reducer 7 <- Map 6 (CUSTOM_SIMPLE_EDGE)
-Reducer 8 <- Map 12 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE)
-Reducer 9 <- Reducer 8 (SIMPLE_EDGE)
+Reducer 6 <- Reducer 2 (SIMPLE_EDGE)
+Reducer 7 <- Map 10 (SIMPLE_EDGE), Reducer 6 (ONE_TO_ONE_EDGE)
+Reducer 9 <- Map 8 (CUSTOM_SIMPLE_EDGE)
 
 Stage-0
   Fetch Operator
     limit:-1
     Stage-1
       Reducer 5 vectorized
-      File Output Operator [FS_142]
-        Limit [LIM_141] (rows=1 width=112)
+      File Output Operator [FS_135]
+        Limit [LIM_134] (rows=1 width=112)
           Number of rows:100
-          Select Operator [SEL_140] (rows=1 width=112)
+          Select Operator [SEL_133] (rows=1 width=112)
             Output:["_col0"]
           <-Reducer 4 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_139]
-              Select Operator [SEL_138] (rows=1 width=112)
+            SHUFFLE [RS_132]
+              Select Operator [SEL_131] (rows=1 width=112)
                 Output:["_col1"]
-                Group By Operator [GBY_137] (rows=1 width=112)
+                Group By Operator [GBY_130] (rows=1 width=112)
                   Output:["_col0"],aggregations:["sum(VALUE._col0)"]
                 <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
                   PARTITION_ONLY_SHUFFLE [RS_36]
@@ -96,103 +93,78 @@ Stage-0
                         Output:["_col2"]
                         Filter Operator [FIL_33] (rows=58081078 width=135)
                           predicate:(_col2 > CAST( (1.3 * _col6) AS decimal(14,7)))
-                          Merge Join Operator [MERGEJOIN_105] (rows=174243235 width=135)
+                          Merge Join Operator [MERGEJOIN_107] (rows=174243235 width=135)
                             Conds:RS_30._col1=RS_31._col2(Inner),Output:["_col2","_col6"]
-                          <-Reducer 10 [ONE_TO_ONE_EDGE]
-                            FORWARD [RS_31]
-                              PartitionCols:_col2
-                              Merge Join Operator [MERGEJOIN_104] (rows=87121617 width=135)
-                                Conds:RS_136._col0=RS_125._col0(Inner),Output:["_col1","_col2"]
-                              <-Map 13 [SIMPLE_EDGE] vectorized
-                                PARTITION_ONLY_SHUFFLE [RS_125]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_124] (rows=231000 width=1436)
-                                    Output:["_col0"]
-                                    Filter Operator [FIL_123] (rows=231000 width=1436)
-                                      predicate:((i_manufact_id = 269) and i_item_sk is not null)
-                                      TableScan [TS_20] (rows=462000 width=1436)
-                                        default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_manufact_id"]
-                              <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized
-                                FORWARD [RS_136]
-                                  PartitionCols:_col0
-                                  Select Operator [SEL_135] (rows=79201469 width=135)
-                                    Output:["_col0","_col1"]
-                                    Group By Operator [GBY_134] (rows=79201469 width=135)
-                                      Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0
-                                    <-Reducer 8 [SIMPLE_EDGE]
-                                      SHUFFLE [RS_17]
-                                        PartitionCols:_col0
-                                        Group By Operator [GBY_16] (rows=158402938 width=135)
-                                          Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","count(_col2)"],keys:_col1
-                                          Merge Join Operator [MERGEJOIN_103] (rows=158402938 width=135)
-                                            Conds:RS_133._col0=RS_110._col0(Inner),Output:["_col1","_col2"]
-                                          <-Map 6 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_110]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_107] (rows=8116 width=1119)
-                                                Output:["_col0"]
-                                                Filter Operator [FIL_106] (rows=8116 width=1119)
-                                                  predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00' AND TIMESTAMP'1998-06-16 00:00:00' and d_date_sk is not null)
-                                                  TableScan [TS_3] (rows=73049 width=1119)
-                                                    default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
-                                          <-Map 12 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_133]
-                                              PartitionCols:_col0
-                                              Select Operator [SEL_132] (rows=144002668 width=135)
-                                                Output:["_col0","_col1","_col2"]
-                                                Filter Operator [FIL_131] (rows=144002668 width=135)
-                                                  predicate:((ws_item_sk BETWEEN DynamicValue(RS_24_item_i_item_sk_min) AND DynamicValue(RS_24_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_24_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null)
-                                                  TableScan [TS_6] (rows=144002668 width=135)
-                                                    default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_ext_discount_amt"]
-                                                  <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_122]
-                                                      Group By Operator [GBY_121] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                      <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                        SHUFFLE [RS_115]
-                                                          Group By Operator [GBY_113] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                            Select Operator [SEL_111] (rows=8116 width=1119)
-                                                              Output:["_col0"]
-                                                               Please refer to the previous Select Operator [SEL_107]
-                                                  <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_130]
-                                                      Group By Operator [GBY_129] (rows=1 width=12)
-                                                        Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                      <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_128]
-                                                          Group By Operator [GBY_127] (rows=1 width=12)
-                                                            Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                            Select Operator [SEL_126] (rows=231000 width=1436)
-                                                              Output:["_col0"]
-                                                               Please refer to the previous Select Operator [SEL_124]
                           <-Reducer 2 [SIMPLE_EDGE]
                             SHUFFLE [RS_30]
                               PartitionCols:_col1
-                              Merge Join Operator [MERGEJOIN_102] (rows=158402938 width=135)
-                                Conds:RS_120._col0=RS_108._col0(Inner),Output:["_col1","_col2"]
-                              <-Map 6 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_108]
+                              Merge Join Operator [MERGEJOIN_104] (rows=158402938 width=135)
+                                Conds:RS_126._col0=RS_110._col0(Inner),Output:["_col1","_col2"]
+                              <-Map 8 [SIMPLE_EDGE] vectorized
+                                PARTITION_ONLY_SHUFFLE [RS_110]
                                   PartitionCols:_col0
-                                   Please refer to the previous Select Operator [SEL_107]
+                                  Select Operator [SEL_109] (rows=8116 width=1119)
+                                    Output:["_col0"]
+                                    Filter Operator [FIL_108] (rows=8116 width=1119)
+                                      predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-18 00:00:00' AND TIMESTAMP'1998-06-16 00:00:00' and d_date_sk is not null)
+                                      TableScan [TS_3] (rows=73049 width=1119)
+                                        default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                               <-Map 1 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_120]
+                                SHUFFLE [RS_126]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_119] (rows=144002668 width=135)
+                                  Select Operator [SEL_125] (rows=144002668 width=135)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_118] (rows=144002668 width=135)
-                                      predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null)
+                                    Filter Operator [FIL_124] (rows=144002668 width=135)
+                                      predicate:((ws_item_sk BETWEEN DynamicValue(RS_24_item_i_item_sk_min) AND DynamicValue(RS_24_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_24_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_28_date_dim_d_date_sk_min) AND DynamicValue(RS_28_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_28_date_dim_d_date_sk_bloom_filter))) and ws_item_sk is not null and ws_sold_date_sk is not null)
                                       TableScan [TS_0] (rows=144002668 width=135)
                                         default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_ext_discount_amt"]
-                                      <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                        BROADCAST [RS_117]
-                                          Group By Operator [GBY_116] (rows=1 width=12)
+                                      <-Reducer 11 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_123]
+                                          Group By Operator [GBY_122] (rows=1 width=12)
                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                          <-Map 6 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_114]
+                                          <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            SHUFFLE [RS_121]
+                                              Group By Operator [GBY_120] (rows=1 width=12)
+                                                Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                                Select Operator [SEL_119] (rows=231000 width=1436)
+                                                  Output:["_col0"]
+                                                  Select Operator [SEL_117] (rows=231000 width=1436)
+                                                    Output:["_col0"]
+                                                    Filter Operator [FIL_116] (rows=231000 width=1436)
+                                                      predicate:((i_manufact_id = 269) and i_item_sk is not null)
+                                                      TableScan [TS_20] (rows=462000 width=1436)
+                                                        default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_manufact_id"]
+                                      <-Reducer 9 [BROADCAST_EDGE] vectorized
+                                        BROADCAST [RS_115]
+                                          Group By Operator [GBY_114] (rows=1 width=12)
+                                            Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                          <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
+                                            PARTITION_ONLY_SHUFFLE [RS_113]
                                               Group By Operator [GBY_112] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                Select Operator [SEL_109] (rows=8116 width=1119)
+                                                Select Operator [SEL_111] (rows=8116 width=1119)
                                                   Output:["_col0"]
-                                                   Please refer to the previous Select Operator [SEL_107]
+                                                   Please refer to the previous Select Operator [SEL_109]
+                          <-Reducer 7 [ONE_TO_ONE_EDGE]
+                            FORWARD [RS_31]
+                              PartitionCols:_col2
+                              Merge Join Operator [MERGEJOIN_106] (rows=87121617 width=135)
+                                Conds:RS_129._col0=RS_118._col0(Inner),Output:["_col1","_col2"]
+                              <-Map 10 [SIMPLE_EDGE] vectorized
+                                SHUFFLE [RS_118]
+                                  PartitionCols:_col0
+                                   Please refer to the previous Select Operator [SEL_117]
+                              <-Reducer 6 [ONE_TO_ONE_EDGE] vectorized
+                                FORWARD [RS_129]
+                                  PartitionCols:_col0
+                                  Select Operator [SEL_128] (rows=79201469 width=135)
+                                    Output:["_col0","_col1"]
+                                    Group By Operator [GBY_127] (rows=79201469 width=135)
+                                      Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)"],keys:KEY._col0
+                                    <-Reducer 2 [SIMPLE_EDGE]
+                                      SHUFFLE [RS_17]
+                                        PartitionCols:_col0
+                                        Group By Operator [GBY_16] (rows=158402938 width=135)
+                                          Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","count(_col2)"],keys:_col1
+                                           Please refer to the previous Merge Join Operator [MERGEJOIN_104]
 


[16/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ab9e954d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ab9e954d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ab9e954d

Branch: refs/heads/master-txnstats
Commit: ab9e954d478ca0e117b04843ab645f2861e5c925
Parents: bf54424
Author: Jesus Camacho Rodriguez <jc...@apache.org>
Authored: Wed Jul 4 14:05:00 2018 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Jul 13 22:15:23 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   4 +
 .../test/resources/testconfiguration.properties |   1 +
 .../hadoop/hive/ql/parse/TezCompiler.java       | 444 +++++++----
 .../hive/ql/ppd/SyntheticJoinPredicate.java     | 174 ++++-
 .../dynamic_semijoin_reduction_sw2.q            |  59 ++
 .../llap/dynamic_semijoin_reduction_sw2.q.out   | 450 +++++++++++
 .../clientpositive/llap/explainuser_1.q.out     |  12 +-
 .../llap/tez_fixed_bucket_pruning.q.out         |   8 +-
 .../clientpositive/perf/tez/query1.q.out        |  76 +-
 .../clientpositive/perf/tez/query16.q.out       | 118 +--
 .../clientpositive/perf/tez/query17.q.out       | 197 ++---
 .../clientpositive/perf/tez/query18.q.out       | 124 +--
 .../clientpositive/perf/tez/query2.q.out        | 116 +--
 .../clientpositive/perf/tez/query23.q.out       | 444 +++++------
 .../clientpositive/perf/tez/query24.q.out       | 252 +++---
 .../clientpositive/perf/tez/query25.q.out       | 188 ++---
 .../clientpositive/perf/tez/query29.q.out       | 148 ++--
 .../clientpositive/perf/tez/query31.q.out       | 322 ++++----
 .../clientpositive/perf/tez/query32.q.out       | 140 ++--
 .../clientpositive/perf/tez/query39.q.out       |  94 +--
 .../clientpositive/perf/tez/query40.q.out       |  92 +--
 .../clientpositive/perf/tez/query54.q.out       | 246 +++---
 .../clientpositive/perf/tez/query59.q.out       | 134 ++--
 .../clientpositive/perf/tez/query64.q.out       | 760 ++++++++++---------
 .../clientpositive/perf/tez/query69.q.out       | 144 ++--
 .../clientpositive/perf/tez/query72.q.out       | 178 ++---
 .../clientpositive/perf/tez/query77.q.out       | 248 +++---
 .../clientpositive/perf/tez/query78.q.out       | 136 ++--
 .../clientpositive/perf/tez/query80.q.out       | 336 ++++----
 .../clientpositive/perf/tez/query91.q.out       |  74 +-
 .../clientpositive/perf/tez/query92.q.out       | 174 ++---
 .../clientpositive/perf/tez/query94.q.out       | 118 +--
 .../clientpositive/perf/tez/query95.q.out       | 241 +++---
 .../spark_dynamic_partition_pruning_3.q.out     |   3 +-
 34 files changed, 3548 insertions(+), 2707 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 6ea68c3..41fae36 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -3752,6 +3752,10 @@ public class HiveConf extends Configuration {
         "When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" +
         "events from the processing vertices to the Tez application master. These events will be\n" +
         "used to prune unnecessary partitions."),
+    TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED("hive.tez.dynamic.partition.pruning.extended", true,
+        "Whether we should try to create additional opportunities for dynamic pruning, e.g., considering\n" +
+        "siblings that may not be created by normal dynamic pruning logic.\n" +
+        "Only works when dynamic pruning is enabled."),
     TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L,
         "Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."),
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index 4001b9f..d08528f 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -894,6 +894,7 @@ minillaplocal.query.files=\
   unionDistinct_3.q,\
   vectorized_join46.q,\
   vectorized_multi_output_select.q,\
+  dynamic_semijoin_reduction_sw2.q,\
   partialdhj.q,\
   stats_date.q,\
   dst.q

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
index 119aa92..1b433c7 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java
@@ -169,44 +169,7 @@ public class TezCompiler extends TaskCompiler {
     }
     perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run reduce sink after join algorithm selection");
 
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    runRemoveDynamicPruningOptimization(procCtx, inputs, outputs);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run remove dynamic pruning by size");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    markSemiJoinForDPP(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Mark certain semijoin edges important based ");
-
-    // Removing semijoin optimization when it may not be beneficial
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    removeSemijoinOptimizationByBenefit(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove Semijoins based on cost benefits");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    // Remove any parallel edge between semijoin and mapjoin.
-    removeSemijoinsParallelToMapJoin(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run the optimizations that use stats for optimization");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    // Remove semijoin optimization if it creates a cycle with mapside joins
-    removeSemiJoinCyclesDueToMapsideJoins(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if it creates a cycle with mapside join");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    // Remove semijoin optimization if SMB join is created.
-    removeSemijoinOptimizationFromSMBJoins(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if needed");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    // Remove bloomfilter if no stats generated
-    removeSemiJoinIfNoStats(procCtx);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove bloom filter optimizations if needed");
-
-    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
-    // after the stats phase we might have some cyclic dependencies that we need
-    // to take care of.
-    runCycleAnalysisForPartitionPruning(procCtx, inputs, outputs);
-    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run cycle analysis for partition pruning");
+    semijoinRemovalBasedTransformations(procCtx, inputs, outputs);
 
     perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
     if(procCtx.conf.getBoolVar(ConfVars.HIVE_SHARED_WORK_OPTIMIZATION)) {
@@ -230,11 +193,6 @@ public class TezCompiler extends TaskCompiler {
 
   private void runCycleAnalysisForPartitionPruning(OptimizeTezProcContext procCtx,
       Set<ReadEntity> inputs, Set<WriteEntity> outputs) throws SemanticException {
-
-    if (!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING)) {
-      return;
-    }
-
     boolean cycleFree = false;
     while (!cycleFree) {
       cycleFree = true;
@@ -454,6 +412,80 @@ public class TezCompiler extends TaskCompiler {
     ogw.startWalking(topNodes, null);
   }
 
+  private void semijoinRemovalBasedTransformations(OptimizeTezProcContext procCtx,
+                                                   Set<ReadEntity> inputs, Set<WriteEntity> outputs) throws SemanticException {
+    PerfLogger perfLogger = SessionState.getPerfLogger();
+
+    final boolean dynamicPartitionPruningEnabled =
+        procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING);
+    final boolean semiJoinReductionEnabled = dynamicPartitionPruningEnabled &&
+        procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION);
+    final boolean extendedReductionEnabled = dynamicPartitionPruningEnabled &&
+        procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED);
+
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (dynamicPartitionPruningEnabled) {
+      runRemoveDynamicPruningOptimization(procCtx, inputs, outputs);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run remove dynamic pruning by size");
+
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled) {
+      markSemiJoinForDPP(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Mark certain semijoin edges important based ");
+
+    // Removing semijoin optimization when it may not be beneficial
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled) {
+      removeSemijoinOptimizationByBenefit(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove Semijoins based on cost benefits");
+
+    // Remove any parallel edge between semijoin and mapjoin.
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled) {
+      removeSemijoinsParallelToMapJoin(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove any parallel edge between semijoin and mapjoin");
+
+    // Remove semijoin optimization if it creates a cycle with mapside joins
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled && procCtx.parseContext.getRsToSemiJoinBranchInfo().size() != 0) {
+      removeSemiJoinCyclesDueToMapsideJoins(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if it creates a cycle with mapside join");
+
+    // Remove semijoin optimization if SMB join is created.
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled && procCtx.parseContext.getRsToSemiJoinBranchInfo().size() != 0) {
+      removeSemijoinOptimizationFromSMBJoins(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove semijoin optimizations if needed");
+
+    // Remove bloomfilter if no stats generated
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (semiJoinReductionEnabled && procCtx.parseContext.getRsToSemiJoinBranchInfo().size() != 0) {
+      removeSemiJoinIfNoStats(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove bloom filter optimizations if needed");
+
+    // after the stats phase we might have some cyclic dependencies that we need
+    // to take care of.
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (dynamicPartitionPruningEnabled) {
+      runCycleAnalysisForPartitionPruning(procCtx, inputs, outputs);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run cycle analysis for partition pruning");
+
+    // remove redundant dpp and semijoins
+    perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER);
+    if (extendedReductionEnabled) {
+      removeRedundantSemijoinAndDpp(procCtx);
+    }
+    perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Remove redundant semijoin reduction");
+  }
+
   private void runRemoveDynamicPruningOptimization(OptimizeTezProcContext procCtx,
       Set<ReadEntity> inputs, Set<WriteEntity> outputs) throws SemanticException {
 
@@ -739,11 +771,6 @@ public class TezCompiler extends TaskCompiler {
 
   private static void removeSemijoinOptimizationFromSMBJoins(
           OptimizeTezProcContext procCtx) throws SemanticException {
-    if (!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION) ||
-            procCtx.parseContext.getRsToSemiJoinBranchInfo().size() == 0) {
-      return;
-    }
-
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(
             new RuleRegExp("R1", TableScanOperator.getOperatorName() + "%" +
@@ -825,11 +852,6 @@ public class TezCompiler extends TaskCompiler {
 
   private static void removeSemiJoinCyclesDueToMapsideJoins(
           OptimizeTezProcContext procCtx) throws SemanticException {
-    if (!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION) ||
-            procCtx.parseContext.getRsToSemiJoinBranchInfo().size() == 0) {
-      return;
-    }
-
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(
             new RuleRegExp("R1", MapJoinOperator.getOperatorName() + "%" +
@@ -914,99 +936,18 @@ public class TezCompiler extends TaskCompiler {
     }
   }
 
-  private static class SemiJoinRemovalIfNoStatsProc implements NodeProcessor {
-
-    @Override
-    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
-                          Object... nodeOutputs) throws SemanticException {
-      assert nd instanceof ReduceSinkOperator;
-      ReduceSinkOperator rs = (ReduceSinkOperator) nd;
-      ParseContext pCtx = ((OptimizeTezProcContext) procCtx).parseContext;
-      SemiJoinBranchInfo sjInfo = pCtx.getRsToSemiJoinBranchInfo().get(rs);
-      if (sjInfo == null) {
-        // nothing to do here.
-        return null;
-      }
-
-      // This is a semijoin branch. The stack should look like,
-      // <Parent Ops>-SEL-GB1-RS1-GB2-RS2
-      GroupByOperator gbOp = (GroupByOperator) (stack.get(stack.size() - 2));
-      GroupByDesc gbDesc = gbOp.getConf();
-      ArrayList<AggregationDesc> aggregationDescs = gbDesc.getAggregators();
-      for (AggregationDesc agg : aggregationDescs) {
-        if (!"bloom_filter".equals(agg.getGenericUDAFName())) {
-          continue;
-        }
-
-        GenericUDAFBloomFilterEvaluator udafBloomFilterEvaluator =
-                (GenericUDAFBloomFilterEvaluator) agg.getGenericUDAFEvaluator();
-        if (udafBloomFilterEvaluator.hasHintEntries())
-         {
-          return null; // Created using hint, skip it
-        }
-
-        long expectedEntries = udafBloomFilterEvaluator.getExpectedEntries();
-        if (expectedEntries == -1 || expectedEntries >
-                pCtx.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES)) {
-          if (sjInfo.getIsHint()) {
-            throw new SemanticException("Removing hinted semijoin due to lack to stats" +
-            " or exceeding max bloom filter entries");
-          }
-          // Remove the semijoin optimization branch along with ALL the mappings
-          // The parent GB2 has all the branches. Collect them and remove them.
-          for (Node node : gbOp.getChildren()) {
-            ReduceSinkOperator rsFinal = (ReduceSinkOperator) node;
-            TableScanOperator ts = pCtx.getRsToSemiJoinBranchInfo().
-                    get(rsFinal).getTsOp();
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("expectedEntries=" + expectedEntries + ". "
-                      + "Either stats unavailable or expectedEntries exceeded max allowable bloomfilter size. "
-                      + "Removing semijoin "
-                      + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(ts));
-            }
-            GenTezUtils.removeBranch(rsFinal);
-            GenTezUtils.removeSemiJoinOperator(pCtx, rsFinal, ts);
-          }
-          return null;
-        }
-      }
-
-      // At this point, hinted semijoin case has been handled already
-      // Check if big table is big enough that runtime filtering is
-      // worth it.
-      TableScanOperator ts = sjInfo.getTsOp();
-      if (ts.getStatistics() != null) {
-        long numRows = ts.getStatistics().getNumRows();
-        if (numRows < pCtx.getConf().getLongVar(ConfVars.TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION)) {
-          if (sjInfo.getShouldRemove()) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Insufficient rows (" + numRows + ") to justify semijoin optimization. Removing semijoin "
-                      + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(ts));
-            }
-            GenTezUtils.removeBranch(rs);
-            GenTezUtils.removeSemiJoinOperator(pCtx, rs, ts);
-          }
-        }
-      }
-      return null;
-    }
-  }
-
   private void removeSemiJoinIfNoStats(OptimizeTezProcContext procCtx)
           throws SemanticException {
-    if(!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
-      // Not needed without semi-join reduction
-      return;
-    }
-
     Map<Rule, NodeProcessor> opRules = new LinkedHashMap<Rule, NodeProcessor>();
     opRules.put(
             new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%" +
                     ReduceSinkOperator.getOperatorName() + "%" +
                     GroupByOperator.getOperatorName() + "%" +
                     ReduceSinkOperator.getOperatorName() + "%"),
-            new SemiJoinRemovalIfNoStatsProc());
-    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, procCtx);
+            new SemiJoinRemovalProc(true, false));
+    SemiJoinRemovalContext ctx =
+        new SemiJoinRemovalContext(procCtx.parseContext);
+    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, ctx);
     List<Node> topNodes = new ArrayList<Node>();
     topNodes.addAll(procCtx.parseContext.getTopOps().values());
     GraphWalker ogw = new PreOrderOnceWalker(disp);
@@ -1077,6 +1018,218 @@ public class TezCompiler extends TaskCompiler {
     GraphWalker ogw = new PreOrderOnceWalker(disp);
     ogw.startWalking(topNodes, null);
   }
+    
+  private class SemiJoinRemovalProc implements NodeProcessor {
+
+    private final boolean removeBasedOnStats;
+    private final boolean removeRedundant;
+
+    private SemiJoinRemovalProc (boolean removeBasedOnStats, boolean removeRedundant) {
+      this.removeBasedOnStats = removeBasedOnStats;
+      this.removeRedundant = removeRedundant;
+    }
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+                          Object... nodeOutputs) throws SemanticException {
+      ReduceSinkOperator rs = (ReduceSinkOperator) nd;
+      SemiJoinRemovalContext rCtx = (SemiJoinRemovalContext) procCtx;
+      ParseContext pCtx = rCtx.parseContext;
+      SemiJoinBranchInfo sjInfo = pCtx.getRsToSemiJoinBranchInfo().get(rs);
+      if (sjInfo == null) {
+        // nothing to do here.
+        return null;
+      }
+      TableScanOperator targetTSOp = sjInfo.getTsOp();
+      ExprNodeDesc targetColExpr = pCtx.getRsToRuntimeValuesInfoMap().get(rs).getTsColExpr();
+
+      // This is a semijoin branch. The stack should look like,
+      // <Parent Ops>-SEL-GB1-RS1-GB2-RS2
+      GroupByOperator gbOp = (GroupByOperator) stack.get(stack.size() - 2);
+      GroupByDesc gbDesc = gbOp.getConf();
+      ArrayList<AggregationDesc> aggregationDescs = gbDesc.getAggregators();
+      for (AggregationDesc agg : aggregationDescs) {
+        if (!isBloomFilterAgg(agg)) {
+          continue;
+        }
+
+        GenericUDAFBloomFilterEvaluator udafBloomFilterEvaluator =
+            (GenericUDAFBloomFilterEvaluator) agg.getGenericUDAFEvaluator();
+        if (udafBloomFilterEvaluator.hasHintEntries()) {
+          return null; // Created using hint, skip it
+        }
+
+        if (removeBasedOnStats) {
+          long expectedEntries = udafBloomFilterEvaluator.getExpectedEntries();
+          if (expectedEntries == -1 || expectedEntries >
+              pCtx.getConf().getLongVar(ConfVars.TEZ_MAX_BLOOM_FILTER_ENTRIES)) {
+            if (sjInfo.getIsHint()) {
+              throw new SemanticException("Removing hinted semijoin due to lack to stats" +
+                  " or exceeding max bloom filter entries");
+            }
+            // Remove the semijoin optimization branch along with ALL the mappings
+            // The parent GB2 has all the branches. Collect them and remove them.
+            for (Node node : gbOp.getChildren()) {
+              ReduceSinkOperator rsFinal = (ReduceSinkOperator) node;
+              TableScanOperator ts = pCtx.getRsToSemiJoinBranchInfo().
+                  get(rsFinal).getTsOp();
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("expectedEntries=" + expectedEntries + ". "
+                    + "Either stats unavailable or expectedEntries exceeded max allowable bloomfilter size. "
+                    + "Removing semijoin "
+                    + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(ts));
+              }
+              GenTezUtils.removeBranch(rsFinal);
+              GenTezUtils.removeSemiJoinOperator(pCtx, rsFinal, ts);
+            }
+            return null;
+          }
+        }
+      }
+
+      if (removeBasedOnStats) {
+        // At this point, hinted semijoin case has been handled already
+        // Check if big table is big enough that runtime filtering is
+        // worth it.
+        TableScanOperator ts = sjInfo.getTsOp();
+        if (ts.getStatistics() != null) {
+          long numRows = ts.getStatistics().getNumRows();
+          if (numRows < pCtx.getConf().getLongVar(ConfVars.TEZ_BIGTABLE_MIN_SIZE_SEMIJOIN_REDUCTION)) {
+            if (sjInfo.getShouldRemove()) {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Insufficient rows (" + numRows + ") to justify semijoin optimization. Removing semijoin "
+                    + OperatorUtils.getOpNamePretty(rs) + " - " + OperatorUtils.getOpNamePretty(ts));
+              }
+              GenTezUtils.removeBranch(rs);
+              GenTezUtils.removeSemiJoinOperator(pCtx, rs, ts);
+            }
+          }
+        }
+      }
+
+      if (removeRedundant) {
+        // Look for RS ops above the current semijoin branch
+        Set<ReduceSinkOperator> rsOps = OperatorUtils.findOperators(
+            ((Operator<?>) stack.get(stack.size() - 5)).getParentOperators().get(0),
+            ReduceSinkOperator.class);
+        for (Operator<?> otherRSOp : rsOps) {
+          SemiJoinBranchInfo otherSjInfo = pCtx.getRsToSemiJoinBranchInfo().get(otherRSOp);
+          // First conjunct prevents SJ RS from removing itself
+          if (otherRSOp != rs && otherSjInfo != null && otherSjInfo.getTsOp() == targetTSOp) {
+            if (rCtx.opsToRemove.containsKey(otherRSOp)) {
+              // We found siblings, since we are removing the other operator, no need to remove this one
+              continue;
+            }
+            ExprNodeDesc otherColExpr = pCtx.getRsToRuntimeValuesInfoMap().get(otherRSOp).getTsColExpr();
+            if (!otherColExpr.isSame(targetColExpr)) {
+              // Filter should be on the same column, otherwise we do not proceed
+              continue;
+            }
+            rCtx.opsToRemove.put(rs, targetTSOp);
+            break;
+          }
+        }
+      }
+
+      return null;
+    }
+  }
+
+  private static boolean isBloomFilterAgg(AggregationDesc agg) {
+    return "bloom_filter".equals(agg.getGenericUDAFName());
+  }
+
+  private static class DynamicPruningRemovalRedundantProc implements NodeProcessor {
+
+    @Override
+    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
+                          Object... nodeOutputs) throws SemanticException {
+      AppMasterEventOperator event = (AppMasterEventOperator) nd;
+      if (!(event.getConf() instanceof DynamicPruningEventDesc)) {
+        return null;
+      }
+
+      SemiJoinRemovalContext rCtx = (SemiJoinRemovalContext) procCtx;
+
+      DynamicPruningEventDesc desc = (DynamicPruningEventDesc) event.getConf();
+      TableScanOperator targetTSOp = desc.getTableScan();
+      String targetColumnName = desc.getTargetColumnName();
+
+      // Look for event ops above the current event op branch
+      Operator<?> op = event.getParentOperators().get(0);
+      while (op.getChildOperators().size() < 2) {
+        op = op.getParentOperators().get(0);
+      }
+      Set<AppMasterEventOperator> eventOps = OperatorUtils.findOperators(
+          op, AppMasterEventOperator.class);
+      for (AppMasterEventOperator otherEvent : eventOps) {
+        if (!(otherEvent.getConf() instanceof DynamicPruningEventDesc)) {
+          continue;
+        }
+        DynamicPruningEventDesc otherDesc = (DynamicPruningEventDesc) otherEvent.getConf();
+        if (otherEvent != event && otherDesc.getTableScan() == targetTSOp &&
+            otherDesc.getTargetColumnName().equals(targetColumnName)) {
+          if (rCtx.opsToRemove.containsKey(otherEvent)) {
+            // We found siblings, since we are removing the other operator, no need to remove this one
+            continue;
+          }
+          rCtx.opsToRemove.put(event, targetTSOp);
+          break;
+        }
+      }
+
+      return null;
+    }
+  }
+
+  private void removeRedundantSemijoinAndDpp(OptimizeTezProcContext procCtx)
+      throws SemanticException {
+    Map<Rule, NodeProcessor> opRules = new LinkedHashMap<>();
+    opRules.put(
+        new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%" +
+            ReduceSinkOperator.getOperatorName() + "%" +
+            GroupByOperator.getOperatorName() + "%" +
+            ReduceSinkOperator.getOperatorName() + "%"),
+        new SemiJoinRemovalProc(false, true));
+    opRules.put(
+        new RuleRegExp("R2",
+            AppMasterEventOperator.getOperatorName() + "%"),
+        new DynamicPruningRemovalRedundantProc());
+
+    // Gather
+    SemiJoinRemovalContext ctx =
+        new SemiJoinRemovalContext(procCtx.parseContext);
+    Dispatcher disp = new DefaultRuleDispatcher(null, opRules, ctx);
+    List<Node> topNodes = new ArrayList<Node>();
+    topNodes.addAll(procCtx.parseContext.getTopOps().values());
+    GraphWalker ogw = new PreOrderOnceWalker(disp);
+    ogw.startWalking(topNodes, null);
+
+    // Remove
+    for (Map.Entry<Operator<?>, TableScanOperator> p : ctx.opsToRemove.entrySet()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Removing redundant " + OperatorUtils.getOpNamePretty(p.getKey()) + " - " + OperatorUtils.getOpNamePretty(p.getValue()));
+      }
+      GenTezUtils.removeBranch(p.getKey());
+      if (p.getKey() instanceof AppMasterEventOperator) {
+        GenTezUtils.removeSemiJoinOperator(procCtx.parseContext, (AppMasterEventOperator) p.getKey(), p.getValue());
+      } else if (p.getKey() instanceof ReduceSinkOperator) {
+        GenTezUtils.removeSemiJoinOperator(procCtx.parseContext, (ReduceSinkOperator) p.getKey(), p.getValue());
+      } else {
+        throw new SemanticException("Unexpected error - type for branch could not be recognized");
+      }
+    }
+  }
+
+  private class SemiJoinRemovalContext implements NodeProcessorCtx {
+    private final ParseContext parseContext;
+    private final Map<Operator<?>, TableScanOperator> opsToRemove;
+
+    private SemiJoinRemovalContext(final ParseContext parseContext) {
+      this.parseContext = parseContext;
+      this.opsToRemove = new HashMap<>();
+    }
+  }
 
   private boolean findParallelSemiJoinBranch(Operator<?> mapjoin, TableScanOperator bigTableTS,
                                              ParseContext parseContext,
@@ -1166,9 +1319,8 @@ public class TezCompiler extends TaskCompiler {
    */
   private void removeSemijoinsParallelToMapJoin(OptimizeTezProcContext procCtx)
           throws SemanticException {
-    if(!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION) ||
-            !procCtx.conf.getBoolVar(ConfVars.HIVECONVERTJOIN) ||
-            procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN)) {
+    if(!procCtx.conf.getBoolVar(ConfVars.HIVECONVERTJOIN) ||
+        procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION_FOR_MAPJOIN)) {
       // Not needed without semi-join reduction or mapjoins or when semijoins
       // are enabled for parallel mapjoins.
       return;
@@ -1376,11 +1528,6 @@ public class TezCompiler extends TaskCompiler {
 
   private void removeSemijoinOptimizationByBenefit(OptimizeTezProcContext procCtx)
       throws SemanticException {
-    if(!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
-      // Not needed without semi-join reduction
-      return;
-    }
-
     List<ReduceSinkOperator> semijoinRsToRemove = new ArrayList<ReduceSinkOperator>();
     Map<ReduceSinkOperator, SemiJoinBranchInfo> map = procCtx.parseContext.getRsToSemiJoinBranchInfo();
     double semijoinReductionThreshold = procCtx.conf.getFloatVar(
@@ -1437,11 +1584,6 @@ public class TezCompiler extends TaskCompiler {
 
   private void markSemiJoinForDPP(OptimizeTezProcContext procCtx)
           throws SemanticException {
-    if(!procCtx.conf.getBoolVar(ConfVars.TEZ_DYNAMIC_SEMIJOIN_REDUCTION)) {
-      // Not needed without semi-join reduction
-      return;
-    }
-
     // Stores the Tablescan operators processed to avoid redoing them.
     Map<TableScanOperator, TableScanOperator> tsOps = new HashMap<>();
     Map<ReduceSinkOperator, SemiJoinBranchInfo> map = procCtx.parseContext.getRsToSemiJoinBranchInfo();

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
index dec2d1e..1f533bc 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/SyntheticJoinPredicate.java
@@ -26,6 +26,12 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Stack;
 
+import org.apache.hadoop.hive.ql.exec.FilterOperator;
+import org.apache.hadoop.hive.ql.exec.GroupByOperator;
+import org.apache.hadoop.hive.ql.exec.OperatorUtils;
+import org.apache.hadoop.hive.ql.exec.SelectOperator;
+import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
+import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -119,14 +125,20 @@ public class SyntheticJoinPredicate extends Transform {
   private static class SyntheticContext implements NodeProcessorCtx {
 
     ParseContext parseContext;
+    boolean extended;
 
     public SyntheticContext(ParseContext pCtx) {
       parseContext = pCtx;
+      extended = parseContext.getConf().getBoolVar(ConfVars.TEZ_DYNAMIC_PARTITION_PRUNING_EXTENDED);
     }
 
     public ParseContext getParseContext() {
       return parseContext;
     }
+
+    public boolean isExtended() {
+      return extended;
+    }
   }
 
   private static class JoinSynthetic implements NodeProcessor {
@@ -134,6 +146,8 @@ public class SyntheticJoinPredicate extends Transform {
     public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
         Object... nodeOutputs) throws SemanticException {
 
+      SyntheticContext sCtx = (SyntheticContext) procCtx;
+
       @SuppressWarnings("unchecked")
       CommonJoinOperator<JoinDesc> join = (CommonJoinOperator<JoinDesc>) nd;
 
@@ -161,9 +175,6 @@ public class SyntheticJoinPredicate extends Transform {
           continue;
         }
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Synthetic predicate: " + srcPos + " --> " + targetPos);
-        }
         ReduceSinkOperator target = (ReduceSinkOperator) parents.get(targetPos);
         List<ExprNodeDesc> sourceKeys = source.getConf().getKeyCols();
         List<ExprNodeDesc> targetKeys = target.getConf().getKeyCols();
@@ -175,8 +186,10 @@ public class SyntheticJoinPredicate extends Transform {
         ExprNodeDesc syntheticExpr = null;
 
         for (int i = 0; i < sourceKeys.size(); ++i) {
-          List<ExprNodeDesc> inArgs = new ArrayList<ExprNodeDesc>();
-          inArgs.add(sourceKeys.get(i));
+          final ExprNodeDesc sourceKey = sourceKeys.get(i);
+
+          List<ExprNodeDesc> inArgs = new ArrayList<>();
+          inArgs.add(sourceKey);
 
           ExprNodeDynamicListDesc dynamicExpr =
               new ExprNodeDynamicListDesc(targetKeys.get(i).getTypeInfo(), target, i);
@@ -186,17 +199,36 @@ public class SyntheticJoinPredicate extends Transform {
           ExprNodeDesc syntheticInExpr =
               ExprNodeGenericFuncDesc.newInstance(FunctionRegistry.getFunctionInfo("in")
                   .getGenericUDF(), inArgs);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Synthetic predicate in " + join + ": " + srcPos + " --> " + targetPos + " (" + syntheticInExpr + ")");
+          }
 
+          List<ExprNodeDesc> andArgs = new ArrayList<>();
           if (syntheticExpr != null) {
-            List<ExprNodeDesc> andArgs = new ArrayList<ExprNodeDesc>();
             andArgs.add(syntheticExpr);
-            andArgs.add(syntheticInExpr);
+          }
+          andArgs.add(syntheticInExpr);
+
+          if(sCtx.isExtended()) {
+            // Backtrack
+            List<ExprNodeDesc> newExprs = createDerivatives(target.getParentOperators().get(0), targetKeys.get(i), sourceKey);
+            if (!newExprs.isEmpty()) {
+              if (LOG.isDebugEnabled()) {
+                for (ExprNodeDesc expr : newExprs) {
+                  LOG.debug("Additional synthetic predicate in " + join + ": " + srcPos + " --> " + targetPos + " (" + expr + ")");
+                }
+              }
+              andArgs.addAll(newExprs);
+            }
+          }
 
+          if (andArgs.size() < 2) {
+            syntheticExpr = syntheticInExpr;
+          } else {
+            // Create AND expression
             syntheticExpr =
                 ExprNodeGenericFuncDesc.newInstance(FunctionRegistry.getFunctionInfo("and")
                     .getGenericUDF(), andArgs);
-          } else {
-            syntheticExpr = syntheticInExpr;
           }
         }
 
@@ -241,6 +273,129 @@ public class SyntheticJoinPredicate extends Transform {
       }
       return result;
     }
+
+    private List<ExprNodeDesc> createDerivatives(final Operator<?> currentOp,
+        final ExprNodeDesc currentNode, final ExprNodeDesc sourceKey) throws SemanticException {
+      List<ExprNodeDesc> resultExprs = new ArrayList<>();
+      return createDerivatives(resultExprs, currentOp, currentNode, sourceKey) ? resultExprs : new ArrayList<>();
+    }
+
+    private boolean createDerivatives(final List<ExprNodeDesc> resultExprs, final Operator<?> op,
+        final ExprNodeDesc currentNode, final ExprNodeDesc sourceKey) throws SemanticException {
+      // 1. Obtain join operator upstream
+      Operator<?> currentOp = op;
+      while (!(currentOp instanceof CommonJoinOperator)) {
+        if (currentOp.getParentOperators() == null || currentOp.getParentOperators().size() != 1) {
+          // Cannot backtrack
+          currentOp = null;
+          break;
+        }
+        if (!(currentOp instanceof FilterOperator) &&
+            !(currentOp instanceof SelectOperator) &&
+            !(currentOp instanceof ReduceSinkOperator) &&
+            !(currentOp instanceof GroupByOperator)) {
+          // Operator not supported
+          currentOp = null;
+          break;
+        }
+        // Move the pointer
+        currentOp = currentOp.getParentOperators().get(0);
+      }
+      if (currentOp == null) {
+        // We did not find any join, we are done
+        return true;
+      }
+      CommonJoinOperator<JoinDesc> joinOp = (CommonJoinOperator) currentOp;
+
+      // 2. Backtrack expression to join output
+      final ExprNodeDesc joinExprNode = ExprNodeDescUtils.backtrack(currentNode, op, joinOp);
+      if (joinExprNode == null || !(joinExprNode instanceof ExprNodeColumnDesc)) {
+        // TODO: We can extend to other expression types
+        // We are done
+        return true;
+      }
+      final String columnRefJoinInput = ((ExprNodeColumnDesc)joinExprNode).getColumn();
+
+      // 3. Find input position in join for expression obtained
+      String columnOutputName = null;
+      for (Map.Entry<String, ExprNodeDesc> e : joinOp.getColumnExprMap().entrySet()) {
+        if (e.getValue() == joinExprNode) {
+          columnOutputName = e.getKey();
+          break;
+        }
+      }
+      if (columnOutputName == null) {
+        // Maybe the join is pruning columns, though it should not.
+        // In any case, we are done
+        return true;
+      }
+      final int srcPos = joinOp.getConf().getReversedExprs().get(columnOutputName);
+      final int[][] targets = getTargets(joinOp);
+      final ReduceSinkOperator rsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(srcPos);
+
+      // 4. Find expression in input RS operator.
+      final Operator<?> rsOpInput = rsOp.getParentOperators().get(0);
+      final ExprNodeDesc rsOpInputExprNode = rsOp.getColumnExprMap().get(columnRefJoinInput);
+      if (rsOpInputExprNode == null) {
+        // Unexpected, we just bail out and we do not infer additional predicates
+        return false;
+      }
+      int posInRSOpKeys = -1;
+      for (int i = 0; i < rsOp.getConf().getKeyCols().size(); i++) {
+        if (rsOpInputExprNode.isSame(rsOp.getConf().getKeyCols().get(i))) {
+          posInRSOpKeys = i;
+          break;
+        }
+      }
+
+      // 5. If it is part of the key, we can create a new semijoin.
+      // In addition, we can do the same for siblings
+      if (posInRSOpKeys >= 0) {
+        // We pass the tests, we add it to the args for the AND expression
+        addParentReduceSink(resultExprs, rsOp, posInRSOpKeys, sourceKey);
+        for (int targetPos: targets[srcPos]) {
+          if (srcPos == targetPos) {
+            continue;
+          }
+          final ReduceSinkOperator otherRsOp = (ReduceSinkOperator) joinOp.getParentOperators().get(targetPos);
+          final Operator<?> otherRsOpInput = otherRsOp.getParentOperators().get(0);
+          // We pass the tests, we add it to the args for the AND expression
+          addParentReduceSink(resultExprs, otherRsOp, posInRSOpKeys, sourceKey);
+          // We propagate to operator below
+          boolean success = createDerivatives(
+              resultExprs, otherRsOpInput, otherRsOp.getConf().getKeyCols().get(posInRSOpKeys), sourceKey);
+          if (!success) {
+            // Something went wrong, bail out
+            return false;
+          }
+        }
+      }
+
+      // 6. Whether it was part of the key or of the value, if we reach here, we can at least
+      // continue propagating to operators below
+      boolean success = createDerivatives(
+          resultExprs, rsOpInput, rsOpInputExprNode, sourceKey);
+      if (!success) {
+        // Something went wrong, bail out
+        return false;
+      }
+
+      // 7. We are done, success
+      return true;
+    }
+
+    private void addParentReduceSink(final List<ExprNodeDesc> andArgs, final ReduceSinkOperator rsOp,
+        final int keyIndex, final ExprNodeDesc sourceKey) throws SemanticException {
+      ExprNodeDynamicListDesc dynamicExpr =
+          new ExprNodeDynamicListDesc(rsOp.getConf().getKeyCols().get(keyIndex).getTypeInfo(), rsOp, keyIndex);
+      // Create synthetic IN expression
+      List<ExprNodeDesc> inArgs = new ArrayList<>();
+      inArgs.add(sourceKey);
+      inArgs.add(dynamicExpr);
+      ExprNodeDesc newNode = ExprNodeGenericFuncDesc.newInstance(
+          FunctionRegistry.getFunctionInfo("in").getGenericUDF(), inArgs);
+      andArgs.add(newNode);
+    }
   }
 
   private static class Vectors {
@@ -285,4 +440,5 @@ public class SyntheticJoinPredicate extends Transform {
       }
     }
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw2.q b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw2.q
new file mode 100644
index 0000000..910119d
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/dynamic_semijoin_reduction_sw2.q
@@ -0,0 +1,59 @@
+--! qt:dataset:srcpart
+--! qt:dataset:alltypesorc
+set hive.compute.query.using.stats=false;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+set hive.optimize.ppd=true;
+set hive.ppd.remove.duplicatefilters=true;
+set hive.tez.dynamic.partition.pruning=true;
+set hive.tez.dynamic.semijoin.reduction=true;
+set hive.optimize.metadataonly=false;
+set hive.optimize.index.filter=true;
+set hive.stats.autogather=true;
+set hive.tez.bigtable.minsize.semijoin.reduction=1;
+set hive.tez.min.bloom.filter.entries=1;
+set hive.stats.fetch.column.stats=true;
+set hive.cbo.enable=false;
+set hive.reorder.nway.joins=false;
+set hive.merge.nway.joins=false;
+
+-- Create Tables
+create table alltypesorc_int_n0 ( cint int, cstring string ) stored as ORC;
+create table srcpart_date_n6 (key string, value string) partitioned by (ds string ) stored as ORC;
+CREATE TABLE srcpart_small_n2(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC;
+
+-- Add Partitions
+alter table srcpart_date_n6 add partition (ds = "2008-04-08");
+alter table srcpart_date_n6 add partition (ds = "2008-04-09");
+
+alter table srcpart_small_n2 add partition (ds1 = "2008-04-08");
+alter table srcpart_small_n2 add partition (ds1 = "2008-04-09");
+
+-- Load
+insert overwrite table alltypesorc_int_n0 select cint, cstring1 from alltypesorc;
+insert overwrite table srcpart_date_n6 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08";
+insert overwrite table srcpart_date_n6 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09";
+insert overwrite table srcpart_small_n2 partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20;
+
+set hive.tez.dynamic.semijoin.reduction=false;
+
+analyze table alltypesorc_int_n0 compute statistics for columns;
+analyze table srcpart_date_n6 compute statistics for columns;
+analyze table srcpart_small_n2 compute statistics for columns;
+
+set hive.tez.dynamic.semijoin.reduction=true;
+EXPLAIN
+SELECT count(*)
+  FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-09") `srcpart_date_n6`
+  JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2`
+    ON (srcpart_date_n6.key = srcpart_small_n2.key1)
+  JOIN (
+    SELECT *
+    FROM (SELECT * FROM alltypesorc_int_n0 WHERE cint = 10) `alltypesorc_int_n0`
+    JOIN (SELECT * FROM srcpart_small_n2) `srcpart_small_n2`
+      ON (alltypesorc_int_n0.cstring = srcpart_small_n2.key1)) b
+    ON (srcpart_small_n2.key1 = b.cstring);
+
+drop table srcpart_date_n6;
+drop table srcpart_small_n2;
+drop table alltypesorc_int_n0;

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_sw2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_sw2.q.out b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_sw2.q.out
new file mode 100644
index 0000000..883bdd7
--- /dev/null
+++ b/ql/src/test/results/clientpositive/llap/dynamic_semijoin_reduction_sw2.q.out
@@ -0,0 +1,450 @@
+PREHOOK: query: create table alltypesorc_int_n0 ( cint int, cstring string ) stored as ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@alltypesorc_int_n0
+POSTHOOK: query: create table alltypesorc_int_n0 ( cint int, cstring string ) stored as ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@alltypesorc_int_n0
+PREHOOK: query: create table srcpart_date_n6 (key string, value string) partitioned by (ds string ) stored as ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_date_n6
+POSTHOOK: query: create table srcpart_date_n6 (key string, value string) partitioned by (ds string ) stored as ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_date_n6
+PREHOOK: query: CREATE TABLE srcpart_small_n2(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@srcpart_small_n2
+POSTHOOK: query: CREATE TABLE srcpart_small_n2(key1 STRING, value1 STRING) partitioned by (ds1 string) STORED as ORC
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@srcpart_small_n2
+PREHOOK: query: alter table srcpart_date_n6 add partition (ds = "2008-04-08")
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@srcpart_date_n6
+POSTHOOK: query: alter table srcpart_date_n6 add partition (ds = "2008-04-08")
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@srcpart_date_n6
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-08
+PREHOOK: query: alter table srcpart_date_n6 add partition (ds = "2008-04-09")
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@srcpart_date_n6
+POSTHOOK: query: alter table srcpart_date_n6 add partition (ds = "2008-04-09")
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@srcpart_date_n6
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-09
+PREHOOK: query: alter table srcpart_small_n2 add partition (ds1 = "2008-04-08")
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@srcpart_small_n2
+POSTHOOK: query: alter table srcpart_small_n2 add partition (ds1 = "2008-04-08")
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@srcpart_small_n2
+POSTHOOK: Output: default@srcpart_small_n2@ds1=2008-04-08
+PREHOOK: query: alter table srcpart_small_n2 add partition (ds1 = "2008-04-09")
+PREHOOK: type: ALTERTABLE_ADDPARTS
+PREHOOK: Output: default@srcpart_small_n2
+POSTHOOK: query: alter table srcpart_small_n2 add partition (ds1 = "2008-04-09")
+POSTHOOK: type: ALTERTABLE_ADDPARTS
+POSTHOOK: Output: default@srcpart_small_n2
+POSTHOOK: Output: default@srcpart_small_n2@ds1=2008-04-09
+PREHOOK: query: insert overwrite table alltypesorc_int_n0 select cint, cstring1 from alltypesorc
+PREHOOK: type: QUERY
+PREHOOK: Input: default@alltypesorc
+PREHOOK: Output: default@alltypesorc_int_n0
+POSTHOOK: query: insert overwrite table alltypesorc_int_n0 select cint, cstring1 from alltypesorc
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@alltypesorc
+POSTHOOK: Output: default@alltypesorc_int_n0
+POSTHOOK: Lineage: alltypesorc_int_n0.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ]
+POSTHOOK: Lineage: alltypesorc_int_n0.cstring SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ]
+PREHOOK: query: insert overwrite table srcpart_date_n6 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+PREHOOK: Output: default@srcpart_date_n6@ds=2008-04-08
+POSTHOOK: query: insert overwrite table srcpart_date_n6 partition (ds = "2008-04-08" ) select key, value from srcpart where ds = "2008-04-08"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-08
+POSTHOOK: Lineage: srcpart_date_n6 PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpart_date_n6 PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table srcpart_date_n6 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@srcpart_date_n6@ds=2008-04-09
+POSTHOOK: query: insert overwrite table srcpart_date_n6 partition (ds = "2008-04-09") select key, value from srcpart where ds = "2008-04-09"
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-09
+POSTHOOK: Lineage: srcpart_date_n6 PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpart_date_n6 PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: insert overwrite table srcpart_small_n2 partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20
+PREHOOK: type: QUERY
+PREHOOK: Input: default@srcpart
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+PREHOOK: Output: default@srcpart_small_n2@ds1=2008-04-09
+POSTHOOK: query: insert overwrite table srcpart_small_n2 partition (ds1 = "2008-04-09") select key, value from srcpart where ds = "2008-04-09" limit 20
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@srcpart
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
+POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
+POSTHOOK: Output: default@srcpart_small_n2@ds1=2008-04-09
+POSTHOOK: Lineage: srcpart_small_n2 PARTITION(ds1=2008-04-09).key1 SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ]
+POSTHOOK: Lineage: srcpart_small_n2 PARTITION(ds1=2008-04-09).value1 SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ]
+PREHOOK: query: analyze table alltypesorc_int_n0 compute statistics for columns
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@alltypesorc_int_n0
+PREHOOK: Output: default@alltypesorc_int_n0
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table alltypesorc_int_n0 compute statistics for columns
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@alltypesorc_int_n0
+POSTHOOK: Output: default@alltypesorc_int_n0
+#### A masked pattern was here ####
+PREHOOK: query: analyze table srcpart_date_n6 compute statistics for columns
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@srcpart_date_n6
+PREHOOK: Input: default@srcpart_date_n6@ds=2008-04-08
+PREHOOK: Input: default@srcpart_date_n6@ds=2008-04-09
+PREHOOK: Output: default@srcpart_date_n6
+PREHOOK: Output: default@srcpart_date_n6@ds=2008-04-08
+PREHOOK: Output: default@srcpart_date_n6@ds=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table srcpart_date_n6 compute statistics for columns
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@srcpart_date_n6
+POSTHOOK: Input: default@srcpart_date_n6@ds=2008-04-08
+POSTHOOK: Input: default@srcpart_date_n6@ds=2008-04-09
+POSTHOOK: Output: default@srcpart_date_n6
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-08
+POSTHOOK: Output: default@srcpart_date_n6@ds=2008-04-09
+#### A masked pattern was here ####
+PREHOOK: query: analyze table srcpart_small_n2 compute statistics for columns
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@srcpart_small_n2
+PREHOOK: Input: default@srcpart_small_n2@ds1=2008-04-08
+PREHOOK: Input: default@srcpart_small_n2@ds1=2008-04-09
+PREHOOK: Output: default@srcpart_small_n2
+PREHOOK: Output: default@srcpart_small_n2@ds1=2008-04-08
+PREHOOK: Output: default@srcpart_small_n2@ds1=2008-04-09
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table srcpart_small_n2 compute statistics for columns
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@srcpart_small_n2
+POSTHOOK: Input: default@srcpart_small_n2@ds1=2008-04-08
+POSTHOOK: Input: default@srcpart_small_n2@ds1=2008-04-09
+POSTHOOK: Output: default@srcpart_small_n2
+POSTHOOK: Output: default@srcpart_small_n2@ds1=2008-04-08
+POSTHOOK: Output: default@srcpart_small_n2@ds1=2008-04-09
+#### A masked pattern was here ####
+PREHOOK: query: EXPLAIN
+SELECT count(*)
+  FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-09") `srcpart_date_n6`
+  JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2`
+    ON (srcpart_date_n6.key = srcpart_small_n2.key1)
+  JOIN (
+    SELECT *
+    FROM (SELECT * FROM alltypesorc_int_n0 WHERE cint = 10) `alltypesorc_int_n0`
+    JOIN (SELECT * FROM srcpart_small_n2) `srcpart_small_n2`
+      ON (alltypesorc_int_n0.cstring = srcpart_small_n2.key1)) b
+    ON (srcpart_small_n2.key1 = b.cstring)
+PREHOOK: type: QUERY
+POSTHOOK: query: EXPLAIN
+SELECT count(*)
+  FROM (SELECT * FROM srcpart_date_n6 WHERE ds = "2008-04-09") `srcpart_date_n6`
+  JOIN (SELECT * FROM srcpart_small_n2 WHERE ds1 = "2008-04-08") `srcpart_small_n2`
+    ON (srcpart_date_n6.key = srcpart_small_n2.key1)
+  JOIN (
+    SELECT *
+    FROM (SELECT * FROM alltypesorc_int_n0 WHERE cint = 10) `alltypesorc_int_n0`
+    JOIN (SELECT * FROM srcpart_small_n2) `srcpart_small_n2`
+      ON (alltypesorc_int_n0.cstring = srcpart_small_n2.key1)) b
+    ON (srcpart_small_n2.key1 = b.cstring)
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Tez
+#### A masked pattern was here ####
+      Edges:
+        Map 1 <- Reducer 6 (BROADCAST_EDGE), Reducer 9 (BROADCAST_EDGE)
+        Map 11 <- Reducer 10 (BROADCAST_EDGE), Reducer 6 (BROADCAST_EDGE)
+        Reducer 10 <- Map 7 (CUSTOM_SIMPLE_EDGE)
+        Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE)
+        Reducer 3 <- Reducer 2 (ONE_TO_ONE_EDGE), Reducer 8 (ONE_TO_ONE_EDGE)
+        Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
+        Reducer 6 <- Map 5 (CUSTOM_SIMPLE_EDGE)
+        Reducer 8 <- Map 11 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+        Reducer 9 <- Reducer 8 (CUSTOM_SIMPLE_EDGE)
+#### A masked pattern was here ####
+      Vertices:
+        Map 1 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_date_n6
+                  filterExpr: (key is not null and (key BETWEEN DynamicValue(RS_20_srcpart_small_n2_key1_min) AND DynamicValue(RS_20_srcpart_small_n2_key1_max) and in_bloom_filter(key, DynamicValue(RS_20_srcpart_small_n2_key1_bloom_filter))) and (key BETWEEN DynamicValue(RS_25_alltypesorc_int_n0_cstring_min) AND DynamicValue(RS_25_alltypesorc_int_n0_cstring_max) and in_bloom_filter(key, DynamicValue(RS_25_alltypesorc_int_n0_cstring_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((key BETWEEN DynamicValue(RS_20_srcpart_small_n2_key1_min) AND DynamicValue(RS_20_srcpart_small_n2_key1_max) and in_bloom_filter(key, DynamicValue(RS_20_srcpart_small_n2_key1_bloom_filter))) and (key BETWEEN DynamicValue(RS_25_alltypesorc_int_n0_cstring_min) AND DynamicValue(RS_25_alltypesorc_int_n0_cstring_max) and in_bloom_filter(key, DynamicValue(RS_25_alltypesorc_int_n0_cstring_bloom_filter))) and key is not null) (type: boolean)
+                    Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: key (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1000 Data size: 87000 Basic stats: COMPLETE Column stats: COMPLETE
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 11 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_small_n2
+                  filterExpr: (key1 is not null and (key1 BETWEEN DynamicValue(RS_12_alltypesorc_int_n0_cstring_min) AND DynamicValue(RS_12_alltypesorc_int_n0_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_12_alltypesorc_int_n0_cstring_bloom_filter))) and (key1 BETWEEN DynamicValue(RS_20_srcpart_small_n2_key1_min) AND DynamicValue(RS_20_srcpart_small_n2_key1_max) and in_bloom_filter(key1, DynamicValue(RS_20_srcpart_small_n2_key1_bloom_filter)))) (type: boolean)
+                  Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL
+                  Filter Operator
+                    predicate: ((key1 BETWEEN DynamicValue(RS_12_alltypesorc_int_n0_cstring_min) AND DynamicValue(RS_12_alltypesorc_int_n0_cstring_max) and in_bloom_filter(key1, DynamicValue(RS_12_alltypesorc_int_n0_cstring_bloom_filter))) and (key1 BETWEEN DynamicValue(RS_20_srcpart_small_n2_key1_min) AND DynamicValue(RS_20_srcpart_small_n2_key1_max) and in_bloom_filter(key1, DynamicValue(RS_20_srcpart_small_n2_key1_bloom_filter))) and key1 is not null) (type: boolean)
+                    Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 20 Data size: 1740 Basic stats: PARTIAL Column stats: PARTIAL
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 5 
+            Map Operator Tree:
+                TableScan
+                  alias: srcpart_small_n2
+                  filterExpr: key1 is not null (type: boolean)
+                  Statistics: Num rows: 1 Data size: 87 Basic stats: PARTIAL Column stats: COMPLETE
+                  Filter Operator
+                    predicate: key1 is not null (type: boolean)
+                    Statistics: Num rows: 1 Data size: 87 Basic stats: PARTIAL Column stats: COMPLETE
+                    Select Operator
+                      expressions: key1 (type: string)
+                      outputColumnNames: _col0
+                      Statistics: Num rows: 1 Data size: 87 Basic stats: PARTIAL Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col0 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col0 (type: string)
+                        Statistics: Num rows: 1 Data size: 87 Basic stats: PARTIAL Column stats: COMPLETE
+                      Select Operator
+                        expressions: _col0 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 87 Basic stats: PARTIAL Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: COMPLETE
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Map 7 
+            Map Operator Tree:
+                TableScan
+                  alias: alltypesorc_int_n0
+                  filterExpr: ((cint = 10) and cstring is not null) (type: boolean)
+                  Statistics: Num rows: 12288 Data size: 899146 Basic stats: COMPLETE Column stats: COMPLETE
+                  Filter Operator
+                    predicate: ((cint = 10) and cstring is not null) (type: boolean)
+                    Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
+                    Select Operator
+                      expressions: cstring (type: string)
+                      outputColumnNames: _col1
+                      Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
+                      Reduce Output Operator
+                        key expressions: _col1 (type: string)
+                        sort order: +
+                        Map-reduce partition columns: _col1 (type: string)
+                        Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE
+                      Select Operator
+                        expressions: _col1 (type: string)
+                        outputColumnNames: _col0
+                        Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE
+                        Group By Operator
+                          aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=1)
+                          mode: hash
+                          outputColumnNames: _col0, _col1, _col2
+                          Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                          Reduce Output Operator
+                            sort order: 
+                            Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                            value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+            Execution mode: vectorized, llap
+            LLAP IO: all inputs
+        Reducer 10 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=1)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 2 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col0 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col3
+                Statistics: Num rows: 1100 Data size: 95700 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col3 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col3 (type: string)
+                  Statistics: Num rows: 1100 Data size: 95700 Basic stats: PARTIAL Column stats: NONE
+        Reducer 3 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col3 (type: string)
+                  1 _col1 (type: string)
+                Statistics: Num rows: 1210 Data size: 105270 Basic stats: PARTIAL Column stats: NONE
+                Group By Operator
+                  aggregations: count()
+                  mode: hash
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
+                  Reduce Output Operator
+                    sort order: 
+                    Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
+                    value expressions: _col0 (type: bigint)
+        Reducer 4 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: count(VALUE._col0)
+                mode: mergepartial
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
+                File Output Operator
+                  compressed: false
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: NONE
+                  table:
+                      input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                      output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                      serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+        Reducer 6 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=1)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 639 Basic stats: PARTIAL Column stats: COMPLETE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 8 
+            Execution mode: llap
+            Reduce Operator Tree:
+              Merge Join Operator
+                condition map:
+                     Inner Join 0 to 1
+                keys:
+                  0 _col1 (type: string)
+                  1 _col0 (type: string)
+                outputColumnNames: _col1
+                Statistics: Num rows: 22 Data size: 1914 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  key expressions: _col1 (type: string)
+                  sort order: +
+                  Map-reduce partition columns: _col1 (type: string)
+                  Statistics: Num rows: 22 Data size: 1914 Basic stats: PARTIAL Column stats: NONE
+                Select Operator
+                  expressions: _col1 (type: string)
+                  outputColumnNames: _col0
+                  Statistics: Num rows: 22 Data size: 1914 Basic stats: PARTIAL Column stats: NONE
+                  Group By Operator
+                    aggregations: min(_col0), max(_col0), bloom_filter(_col0, expectedEntries=22)
+                    mode: hash
+                    outputColumnNames: _col0, _col1, _col2
+                    Statistics: Num rows: 1 Data size: 552 Basic stats: PARTIAL Column stats: NONE
+                    Reduce Output Operator
+                      sort order: 
+                      Statistics: Num rows: 1 Data size: 552 Basic stats: PARTIAL Column stats: NONE
+                      value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+        Reducer 9 
+            Execution mode: vectorized, llap
+            Reduce Operator Tree:
+              Group By Operator
+                aggregations: min(VALUE._col0), max(VALUE._col1), bloom_filter(VALUE._col2, expectedEntries=22)
+                mode: final
+                outputColumnNames: _col0, _col1, _col2
+                Statistics: Num rows: 1 Data size: 552 Basic stats: PARTIAL Column stats: NONE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 552 Basic stats: PARTIAL Column stats: NONE
+                  value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: binary)
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: drop table srcpart_date_n6
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@srcpart_date_n6
+PREHOOK: Output: default@srcpart_date_n6
+POSTHOOK: query: drop table srcpart_date_n6
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@srcpart_date_n6
+POSTHOOK: Output: default@srcpart_date_n6
+PREHOOK: query: drop table srcpart_small_n2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@srcpart_small_n2
+PREHOOK: Output: default@srcpart_small_n2
+POSTHOOK: query: drop table srcpart_small_n2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@srcpart_small_n2
+POSTHOOK: Output: default@srcpart_small_n2
+PREHOOK: query: drop table alltypesorc_int_n0
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@alltypesorc_int_n0
+PREHOOK: Output: default@alltypesorc_int_n0
+POSTHOOK: query: drop table alltypesorc_int_n0
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@alltypesorc_int_n0
+POSTHOOK: Output: default@alltypesorc_int_n0

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
index f87fe36..6a2ae62 100644
--- a/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
+++ b/ql/src/test/results/clientpositive/llap/explainuser_1.q.out
@@ -5347,8 +5347,8 @@ Stage-0
     Stage-1
       Map 3 llap
       File Output Operator [FS_21]
-        Map Join Operator [MAPJOIN_67] (rows=2 width=404)
-          Conds:RS_16._col0=RS_17._col0(Inner),RS_17._col0=MAPJOIN_66._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+        Map Join Operator [MAPJOIN_71] (rows=2 width=404)
+          Conds:RS_16._col0=RS_17._col0(Inner),RS_17._col0=MAPJOIN_70._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
         <-Map 1 [BROADCAST_EDGE] llap
           BROADCAST [RS_16]
             PartitionCols:_col0
@@ -5367,7 +5367,7 @@ Stage-0
                 predicate:key is not null
                 TableScan [TS_3] (rows=1 width=368)
                   default@t2_n70,b,Tbl:COMPLETE,Col:NONE,Output:["key","val"]
-        <-Map Join Operator [MAPJOIN_66] (rows=1 width=404)
+        <-Map Join Operator [MAPJOIN_70] (rows=1 width=404)
             Conds:SEL_8._col0=RS_13._col0(Inner),Output:["_col0","_col1","_col2","_col3"]
           <-Map 4 [BROADCAST_EDGE] llap
             BROADCAST [RS_13]
@@ -5408,8 +5408,8 @@ Stage-0
     Stage-1
       Map 3 llap
       File Output Operator [FS_21]
-        Map Join Operator [MAPJOIN_67] (rows=2 width=404)
-          Conds:RS_16._col0=RS_17._col0(Inner),RS_17._col0=MAPJOIN_66._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+        Map Join Operator [MAPJOIN_71] (rows=2 width=404)
+          Conds:RS_16._col0=RS_17._col0(Inner),RS_17._col0=MAPJOIN_70._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
         <-Map 1 [BROADCAST_EDGE] llap
           BROADCAST [RS_16]
             PartitionCols:_col0
@@ -5428,7 +5428,7 @@ Stage-0
                 predicate:key is not null
                 TableScan [TS_3] (rows=1 width=368)
                   default@t2_n70,b,Tbl:COMPLETE,Col:NONE,Output:["key","val"]
-        <-Map Join Operator [MAPJOIN_66] (rows=1 width=404)
+        <-Map Join Operator [MAPJOIN_70] (rows=1 width=404)
             Conds:SEL_8._col0=RS_13._col0(Inner),Output:["_col0","_col1","_col2","_col3"]
           <-Map 4 [BROADCAST_EDGE] llap
             BROADCAST [RS_13]

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out
index 6987a96..74fc2e8 100644
--- a/ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/tez_fixed_bucket_pruning.q.out
@@ -424,7 +424,7 @@ POSTHOOK: type: ANALYZE_TABLE
 POSTHOOK: Input: default@l3_monthly_dw_dimplan
 POSTHOOK: Output: default@l3_monthly_dw_dimplan
 #### A masked pattern was here ####
-Warning: Shuffle Join MERGEJOIN[47][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: EXPLAIN EXTENDED
 SELECT DW.PROJECT_OBJECT_ID, S1.PLAN_KEY as PLAN_KEY, S2.PROJECT_KEY AS PROJECT_KEY
 FROM l3_clarity__L3_SNAP_NUMBER_2018022300104 snap inner join
@@ -873,7 +873,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[47][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: SELECT DW.PROJECT_OBJECT_ID, S1.PLAN_KEY as PLAN_KEY, S2.PROJECT_KEY AS PROJECT_KEY
 FROM l3_clarity__L3_SNAP_NUMBER_2018022300104 snap inner join
 l3_clarity__L3_MONTHLY_DW_FACTPLAN_DW_STG_2018022300104_1 DW on 1=1
@@ -915,7 +915,7 @@ POSTHOOK: Input: default@l3_monthly_dw_dimplan
 7147200	NULL	27114
 7147200	NULL	27114
 7147200	NULL	27114
-Warning: Shuffle Join MERGEJOIN[47][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: EXPLAIN EXTENDED
 SELECT DW.PROJECT_OBJECT_ID, S1.PLAN_KEY as PLAN_KEY, S2.PROJECT_KEY AS PROJECT_KEY
 FROM l3_clarity__L3_SNAP_NUMBER_2018022300104 snap inner join
@@ -1365,7 +1365,7 @@ STAGE PLANS:
       Processor Tree:
         ListSink
 
-Warning: Shuffle Join MERGEJOIN[47][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
+Warning: Shuffle Join MERGEJOIN[48][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product
 PREHOOK: query: SELECT DW.PROJECT_OBJECT_ID, S1.PLAN_KEY as PLAN_KEY, S2.PROJECT_KEY AS PROJECT_KEY
 FROM l3_clarity__L3_SNAP_NUMBER_2018022300104 snap inner join
 l3_clarity__L3_MONTHLY_DW_FACTPLAN_DW_STG_2018022300104_1 DW on 1=1

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query1.q.out b/ql/src/test/results/clientpositive/perf/tez/query1.q.out
index 579940c..58c422d 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query1.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query1.q.out
@@ -63,10 +63,10 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_159]
-        Limit [LIM_158] (rows=100 width=860)
+      File Output Operator [FS_161]
+        Limit [LIM_160] (rows=100 width=860)
           Number of rows:100
-          Select Operator [SEL_157] (rows=32266667 width=860)
+          Select Operator [SEL_159] (rows=32266667 width=860)
             Output:["_col0"]
           <-Reducer 6 [SIMPLE_EDGE]
             SHUFFLE [RS_50]
@@ -74,96 +74,96 @@ Stage-0
                 Output:["_col0"]
                 Filter Operator [FIL_48] (rows=32266667 width=860)
                   predicate:(_col2 > _col7)
-                  Merge Join Operator [MERGEJOIN_132] (rows=96800003 width=860)
-                    Conds:RS_45._col1=RS_156._col1(Inner),Output:["_col2","_col6","_col7"]
+                  Merge Join Operator [MERGEJOIN_134] (rows=96800003 width=860)
+                    Conds:RS_45._col1=RS_158._col1(Inner),Output:["_col2","_col6","_col7"]
                   <-Reducer 5 [SIMPLE_EDGE]
                     SHUFFLE [RS_45]
                       PartitionCols:_col1
-                      Merge Join Operator [MERGEJOIN_130] (rows=88000001 width=860)
-                        Conds:RS_42._col0=RS_151._col0(Inner),Output:["_col1","_col2","_col6"]
+                      Merge Join Operator [MERGEJOIN_132] (rows=88000001 width=860)
+                        Conds:RS_42._col0=RS_153._col0(Inner),Output:["_col1","_col2","_col6"]
                       <-Map 12 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_151]
+                        SHUFFLE [RS_153]
                           PartitionCols:_col0
-                          Select Operator [SEL_150] (rows=80000000 width=860)
+                          Select Operator [SEL_152] (rows=80000000 width=860)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_149] (rows=80000000 width=860)
+                            Filter Operator [FIL_151] (rows=80000000 width=860)
                               predicate:c_customer_sk is not null
                               TableScan [TS_17] (rows=80000000 width=860)
                                 default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_customer_id"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_42]
                           PartitionCols:_col0
-                          Merge Join Operator [MERGEJOIN_129] (rows=34842647 width=77)
-                            Conds:RS_145._col1=RS_148._col0(Inner),Output:["_col0","_col1","_col2"]
+                          Merge Join Operator [MERGEJOIN_131] (rows=34842647 width=77)
+                            Conds:RS_147._col1=RS_150._col0(Inner),Output:["_col0","_col1","_col2"]
                           <-Map 11 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_148]
+                            SHUFFLE [RS_150]
                               PartitionCols:_col0
-                              Select Operator [SEL_147] (rows=852 width=1910)
+                              Select Operator [SEL_149] (rows=852 width=1910)
                                 Output:["_col0"]
-                                Filter Operator [FIL_146] (rows=852 width=1910)
+                                Filter Operator [FIL_148] (rows=852 width=1910)
                                   predicate:((s_state = 'NM') and s_store_sk is not null)
                                   TableScan [TS_14] (rows=1704 width=1910)
                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_state"]
                           <-Reducer 3 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_145]
+                            SHUFFLE [RS_147]
                               PartitionCols:_col1
-                              Select Operator [SEL_144] (rows=31675133 width=77)
+                              Select Operator [SEL_146] (rows=31675133 width=77)
                                 Output:["_col0","_col1","_col2"]
-                                Group By Operator [GBY_143] (rows=31675133 width=77)
+                                Group By Operator [GBY_145] (rows=31675133 width=77)
                                   Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
                                 <-Reducer 2 [SIMPLE_EDGE]
                                   SHUFFLE [RS_11]
                                     PartitionCols:_col0, _col1
                                     Group By Operator [GBY_10] (rows=63350266 width=77)
                                       Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col2, _col1
-                                      Merge Join Operator [MERGEJOIN_128] (rows=63350266 width=77)
-                                        Conds:RS_137._col0=RS_141._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      Merge Join Operator [MERGEJOIN_130] (rows=63350266 width=77)
+                                        Conds:RS_139._col0=RS_143._col0(Inner),Output:["_col1","_col2","_col3"]
                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_137]
+                                        SHUFFLE [RS_139]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_135] (rows=57591150 width=77)
+                                          Select Operator [SEL_137] (rows=57591150 width=77)
                                             Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_133] (rows=57591150 width=77)
+                                            Filter Operator [FIL_135] (rows=57591150 width=77)
                                               predicate:(sr_customer_sk is not null and sr_returned_date_sk is not null and sr_store_sk is not null)
                                               TableScan [TS_0] (rows=57591150 width=77)
                                                 default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_customer_sk","sr_store_sk","sr_fee"]
                                       <-Map 10 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_141]
+                                        SHUFFLE [RS_143]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_140] (rows=36524 width=1119)
+                                          Select Operator [SEL_142] (rows=36524 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_139] (rows=36524 width=1119)
+                                            Filter Operator [FIL_141] (rows=36524 width=1119)
                                               predicate:((d_year = 2000) and d_date_sk is not null)
                                               TableScan [TS_3] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
                   <-Reducer 9 [SIMPLE_EDGE] vectorized
-                    SHUFFLE [RS_156]
+                    SHUFFLE [RS_158]
                       PartitionCols:_col1
-                      Select Operator [SEL_155] (rows=15837566 width=77)
+                      Select Operator [SEL_157] (rows=15837566 width=77)
                         Output:["_col0","_col1"]
-                        Group By Operator [GBY_154] (rows=15837566 width=77)
+                        Group By Operator [GBY_156] (rows=15837566 width=77)
                           Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","count(_col2)"],keys:_col1
-                          Select Operator [SEL_153] (rows=31675133 width=77)
+                          Select Operator [SEL_155] (rows=31675133 width=77)
                             Output:["_col1","_col2"]
-                            Group By Operator [GBY_152] (rows=31675133 width=77)
+                            Group By Operator [GBY_154] (rows=31675133 width=77)
                               Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0, KEY._col1
                             <-Reducer 8 [SIMPLE_EDGE]
                               SHUFFLE [RS_31]
                                 PartitionCols:_col0
                                 Group By Operator [GBY_30] (rows=63350266 width=77)
                                   Output:["_col0","_col1","_col2"],aggregations:["sum(_col3)"],keys:_col2, _col1
-                                  Merge Join Operator [MERGEJOIN_131] (rows=63350266 width=77)
-                                    Conds:RS_138._col0=RS_142._col0(Inner),Output:["_col1","_col2","_col3"]
+                                  Merge Join Operator [MERGEJOIN_133] (rows=63350266 width=77)
+                                    Conds:RS_140._col0=RS_144._col0(Inner),Output:["_col1","_col2","_col3"]
                                   <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_138]
+                                    SHUFFLE [RS_140]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_136] (rows=57591150 width=77)
+                                      Select Operator [SEL_138] (rows=57591150 width=77)
                                         Output:["_col0","_col1","_col2","_col3"]
-                                        Filter Operator [FIL_134] (rows=57591150 width=77)
+                                        Filter Operator [FIL_136] (rows=57591150 width=77)
                                           predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
                                            Please refer to the previous TableScan [TS_0]
                                   <-Map 10 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_142]
+                                    SHUFFLE [RS_144]
                                       PartitionCols:_col0
-                                       Please refer to the previous Select Operator [SEL_140]
+                                       Please refer to the previous Select Operator [SEL_142]
 


[10/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query59.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query59.q.out b/ql/src/test/results/clientpositive/perf/tez/query59.q.out
index 6b2dcc3..29cf136 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query59.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query59.q.out
@@ -109,51 +109,51 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_229]
-        Limit [LIM_228] (rows=100 width=88)
+      File Output Operator [FS_235]
+        Limit [LIM_234] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_227] (rows=421657640 width=88)
+          Select Operator [SEL_233] (rows=421657640 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
           <-Reducer 6 [SIMPLE_EDGE]
             SHUFFLE [RS_60]
               Select Operator [SEL_59] (rows=421657640 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
-                Merge Join Operator [MERGEJOIN_180] (rows=421657640 width=88)
+                Merge Join Operator [MERGEJOIN_186] (rows=421657640 width=88)
                   Conds:RS_56._col2, _col1=RS_57._col1, (_col0 - 52)(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col12","_col13","_col14","_col15","_col16","_col17"]
                 <-Reducer 13 [SIMPLE_EDGE]
                   SHUFFLE [RS_57]
                     PartitionCols:_col1, (_col0 - 52)
                     Select Operator [SEL_55] (rows=383325119 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                      Merge Join Operator [MERGEJOIN_179] (rows=383325119 width=88)
-                        Conds:RS_52._col1=RS_216._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col11"]
+                      Merge Join Operator [MERGEJOIN_185] (rows=383325119 width=88)
+                        Conds:RS_52._col1=RS_222._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col11"]
                       <-Map 19 [SIMPLE_EDGE] vectorized
-                        PARTITION_ONLY_SHUFFLE [RS_216]
+                        PARTITION_ONLY_SHUFFLE [RS_222]
                           PartitionCols:_col0
-                          Select Operator [SEL_215] (rows=1704 width=1910)
+                          Select Operator [SEL_221] (rows=1704 width=1910)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_214] (rows=1704 width=1910)
+                            Filter Operator [FIL_220] (rows=1704 width=1910)
                               predicate:(s_store_id is not null and s_store_sk is not null)
                               TableScan [TS_46] (rows=1704 width=1910)
                                 default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id"]
                       <-Reducer 12 [SIMPLE_EDGE]
                         SHUFFLE [RS_52]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_178] (rows=348477374 width=88)
-                            Conds:RS_226._col0=RS_211._col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                          Merge Join Operator [MERGEJOIN_184] (rows=348477374 width=88)
+                            Conds:RS_232._col0=RS_217._col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                           <-Map 15 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_211]
+                            SHUFFLE [RS_217]
                               PartitionCols:_col1
-                              Select Operator [SEL_209] (rows=8116 width=1119)
+                              Select Operator [SEL_215] (rows=8116 width=1119)
                                 Output:["_col1"]
-                                Filter Operator [FIL_207] (rows=8116 width=1119)
+                                Filter Operator [FIL_213] (rows=8116 width=1119)
                                   predicate:(d_month_seq BETWEEN 1197 AND 1208 and d_week_seq is not null)
                                   TableScan [TS_15] (rows=73049 width=1119)
                                     default@date_dim,d,Tbl:COMPLETE,Col:NONE,Output:["d_month_seq","d_week_seq"]
                           <-Reducer 11 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_226]
+                            SHUFFLE [RS_232]
                               PartitionCols:_col0
-                              Group By Operator [GBY_225] (rows=316797606 width=88)
+                              Group By Operator [GBY_231] (rows=316797606 width=88)
                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)"],keys:KEY._col0, KEY._col1
                               <-Reducer 10 [SIMPLE_EDGE]
                                 SHUFFLE [RS_40]
@@ -162,81 +162,81 @@ Stage-0
                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(_col2)","sum(_col3)","sum(_col5)","sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col0, _col1
                                     Select Operator [SEL_37] (rows=633595212 width=88)
                                       Output:["_col0","_col1","_col2","_col3","_col5","_col6","_col7","_col8"]
-                                      Merge Join Operator [MERGEJOIN_177] (rows=633595212 width=88)
-                                        Conds:RS_224._col0=RS_185._col0(Inner),Output:["_col1","_col2","_col4","_col5"]
+                                      Merge Join Operator [MERGEJOIN_183] (rows=633595212 width=88)
+                                        Conds:RS_230._col0=RS_191._col0(Inner),Output:["_col1","_col2","_col4","_col5"]
                                       <-Map 8 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_185]
+                                        PARTITION_ONLY_SHUFFLE [RS_191]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_182] (rows=73049 width=1119)
+                                          Select Operator [SEL_188] (rows=73049 width=1119)
                                             Output:["_col0","_col1","_col2"]
-                                            Filter Operator [FIL_181] (rows=73049 width=1119)
+                                            Filter Operator [FIL_187] (rows=73049 width=1119)
                                               predicate:(d_date_sk is not null and d_week_seq is not null)
                                               TableScan [TS_3] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_week_seq","d_day_name"]
                                       <-Map 18 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_224]
+                                        SHUFFLE [RS_230]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_223] (rows=575995635 width=88)
+                                          Select Operator [SEL_229] (rows=575995635 width=88)
                                             Output:["_col0","_col1","_col2"]
-                                            Filter Operator [FIL_222] (rows=575995635 width=88)
+                                            Filter Operator [FIL_228] (rows=575995635 width=88)
                                               predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_35_date_dim_d_date_sk_min) AND DynamicValue(RS_35_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_35_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_53_store_s_store_sk_min) AND DynamicValue(RS_53_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_53_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
                                               TableScan [TS_28] (rows=575995635 width=88)
                                                 default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_sales_price"]
                                               <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_213]
-                                                  Group By Operator [GBY_212] (rows=1 width=12)
+                                                BROADCAST [RS_219]
+                                                  Group By Operator [GBY_218] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_190]
-                                                      Group By Operator [GBY_188] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_196]
+                                                      Group By Operator [GBY_194] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_186] (rows=73049 width=1119)
+                                                        Select Operator [SEL_192] (rows=73049 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_182]
+                                                           Please refer to the previous Select Operator [SEL_188]
                                               <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_221]
-                                                  Group By Operator [GBY_220] (rows=1 width=12)
+                                                BROADCAST [RS_227]
+                                                  Group By Operator [GBY_226] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 19 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_219]
-                                                      Group By Operator [GBY_218] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_225]
+                                                      Group By Operator [GBY_224] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_217] (rows=1704 width=1910)
+                                                        Select Operator [SEL_223] (rows=1704 width=1910)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_215]
+                                                           Please refer to the previous Select Operator [SEL_221]
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_56]
                     PartitionCols:_col2, _col1
                     Select Operator [SEL_27] (rows=383325119 width=88)
                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
-                      Merge Join Operator [MERGEJOIN_176] (rows=383325119 width=88)
-                        Conds:RS_24._col1=RS_195._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col12","_col13"]
+                      Merge Join Operator [MERGEJOIN_182] (rows=383325119 width=88)
+                        Conds:RS_24._col1=RS_201._col0(Inner),Output:["_col0","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col12","_col13"]
                       <-Map 16 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_195]
+                        SHUFFLE [RS_201]
                           PartitionCols:_col0
-                          Select Operator [SEL_194] (rows=1704 width=1910)
+                          Select Operator [SEL_200] (rows=1704 width=1910)
                             Output:["_col0","_col1","_col2"]
-                            Filter Operator [FIL_193] (rows=1704 width=1910)
+                            Filter Operator [FIL_199] (rows=1704 width=1910)
                               predicate:(s_store_id is not null and s_store_sk is not null)
                               TableScan [TS_18] (rows=1704 width=1910)
                                 default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk","s_store_id","s_store_name"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_175] (rows=348477374 width=88)
-                            Conds:RS_205._col0=RS_210._col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
+                          Merge Join Operator [MERGEJOIN_181] (rows=348477374 width=88)
+                            Conds:RS_211._col0=RS_216._col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
                           <-Map 15 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_210]
+                            SHUFFLE [RS_216]
                               PartitionCols:_col1
-                              Select Operator [SEL_208] (rows=8116 width=1119)
+                              Select Operator [SEL_214] (rows=8116 width=1119)
                                 Output:["_col1"]
-                                Filter Operator [FIL_206] (rows=8116 width=1119)
+                                Filter Operator [FIL_212] (rows=8116 width=1119)
                                   predicate:(d_month_seq BETWEEN 1185 AND 1196 and d_week_seq is not null)
                                    Please refer to the previous TableScan [TS_15]
                           <-Reducer 3 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_205]
+                            SHUFFLE [RS_211]
                               PartitionCols:_col0
-                              Group By Operator [GBY_204] (rows=316797606 width=88)
+                              Group By Operator [GBY_210] (rows=316797606 width=88)
                                 Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)"],keys:KEY._col0, KEY._col1
                               <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_12]
@@ -245,41 +245,41 @@ Stage-0
                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)","sum(_col5)","sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col0, _col1
                                     Select Operator [SEL_9] (rows=633595212 width=88)
                                       Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"]
-                                      Merge Join Operator [MERGEJOIN_174] (rows=633595212 width=88)
-                                        Conds:RS_203._col0=RS_183._col0(Inner),Output:["_col1","_col2","_col4","_col5"]
+                                      Merge Join Operator [MERGEJOIN_180] (rows=633595212 width=88)
+                                        Conds:RS_209._col0=RS_189._col0(Inner),Output:["_col1","_col2","_col4","_col5"]
                                       <-Map 8 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_183]
+                                        PARTITION_ONLY_SHUFFLE [RS_189]
                                           PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_182]
+                                           Please refer to the previous Select Operator [SEL_188]
                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_203]
+                                        SHUFFLE [RS_209]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_202] (rows=575995635 width=88)
+                                          Select Operator [SEL_208] (rows=575995635 width=88)
                                             Output:["_col0","_col1","_col2"]
-                                            Filter Operator [FIL_201] (rows=575995635 width=88)
+                                            Filter Operator [FIL_207] (rows=575995635 width=88)
                                               predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_7_date_dim_d_date_sk_min) AND DynamicValue(RS_7_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_7_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_25_store_s_store_sk_min) AND DynamicValue(RS_25_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_25_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
                                               TableScan [TS_0] (rows=575995635 width=88)
                                                 default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_sales_price"]
                                               <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_200]
-                                                  Group By Operator [GBY_199] (rows=1 width=12)
+                                                BROADCAST [RS_206]
+                                                  Group By Operator [GBY_205] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_198]
-                                                      Group By Operator [GBY_197] (rows=1 width=12)
+                                                    SHUFFLE [RS_204]
+                                                      Group By Operator [GBY_203] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_196] (rows=1704 width=1910)
+                                                        Select Operator [SEL_202] (rows=1704 width=1910)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_194]
+                                                           Please refer to the previous Select Operator [SEL_200]
                                               <-Reducer 9 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_192]
-                                                  Group By Operator [GBY_191] (rows=1 width=12)
+                                                BROADCAST [RS_198]
+                                                  Group By Operator [GBY_197] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 8 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_189]
-                                                      Group By Operator [GBY_187] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_195]
+                                                      Group By Operator [GBY_193] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_184] (rows=73049 width=1119)
+                                                        Select Operator [SEL_190] (rows=73049 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_182]
+                                                           Please refer to the previous Select Operator [SEL_188]
 


[36/48] hive git commit: HIVE-20179: Some Tez jar-s are not on classpath so HS2 keeps too long to start (Peter Vary, reviewed by Zoltan Haindrich)

Posted by se...@apache.org.
HIVE-20179: Some Tez jar-s are not on classpath so HS2 keeps too long to start (Peter Vary, reviewed by Zoltan Haindrich)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4ab10801
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4ab10801
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4ab10801

Branch: refs/heads/master-txnstats
Commit: 4ab108013f764b5a04eead386f9f73e163f69e68
Parents: 85a3dd7
Author: Peter Vary <pv...@cloudera.com>
Authored: Tue Jul 17 09:55:02 2018 +0200
Committer: Peter Vary <pv...@cloudera.com>
Committed: Tue Jul 17 09:55:02 2018 +0200

----------------------------------------------------------------------
 .../apache/hive/service/server/HiveServer2.java | 20 +++++++++++---------
 1 file changed, 11 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4ab10801/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 2471883..432a341 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -738,16 +738,18 @@ public class HiveServer2 extends CompositeService {
       }
     }
 
-    if (!activePassiveHA) {
-      LOG.info("HS2 interactive HA not enabled. Starting tez sessions..");
-      try {
-        startOrReconnectTezSessions();
-      } catch (Exception e) {
-        LOG.error("Error starting  Tez sessions: ", e);
-        throw new ServiceException(e);
+    if (hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
+      if (!activePassiveHA) {
+        LOG.info("HS2 interactive HA not enabled. Starting tez sessions..");
+        try {
+          startOrReconnectTezSessions();
+        } catch (Exception e) {
+          LOG.error("Error starting  Tez sessions: ", e);
+          throw new ServiceException(e);
+        }
+      } else {
+        LOG.info("HS2 interactive HA enabled. Tez sessions will be started/reconnected by the leader.");
       }
-    } else {
-      LOG.info("HS2 interactive HA enabled. Tez sessions will be started/reconnected by the leader.");
     }
   }
 


[44/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 5a22e4e,ae12471..292cf51
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@@ -27273,15 -26323,15 +27193,15 @@@ class WMFullResourcePlan 
          case 2:
            if ($ftype == TType::LST) {
              $this->pools = array();
-             $_size778 = 0;
-             $_etype781 = 0;
-             $xfer += $input->readListBegin($_etype781, $_size778);
-             for ($_i782 = 0; $_i782 < $_size778; ++$_i782)
 -            $_size763 = 0;
 -            $_etype766 = 0;
 -            $xfer += $input->readListBegin($_etype766, $_size763);
 -            for ($_i767 = 0; $_i767 < $_size763; ++$_i767)
++            $_size770 = 0;
++            $_etype773 = 0;
++            $xfer += $input->readListBegin($_etype773, $_size770);
++            for ($_i774 = 0; $_i774 < $_size770; ++$_i774)
              {
-               $elem783 = null;
-               $elem783 = new \metastore\WMPool();
-               $xfer += $elem783->read($input);
-               $this->pools []= $elem783;
 -              $elem768 = null;
 -              $elem768 = new \metastore\WMPool();
 -              $xfer += $elem768->read($input);
 -              $this->pools []= $elem768;
++              $elem775 = null;
++              $elem775 = new \metastore\WMPool();
++              $xfer += $elem775->read($input);
++              $this->pools []= $elem775;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27291,15 -26341,15 +27211,15 @@@
          case 3:
            if ($ftype == TType::LST) {
              $this->mappings = array();
-             $_size784 = 0;
-             $_etype787 = 0;
-             $xfer += $input->readListBegin($_etype787, $_size784);
-             for ($_i788 = 0; $_i788 < $_size784; ++$_i788)
 -            $_size769 = 0;
 -            $_etype772 = 0;
 -            $xfer += $input->readListBegin($_etype772, $_size769);
 -            for ($_i773 = 0; $_i773 < $_size769; ++$_i773)
++            $_size776 = 0;
++            $_etype779 = 0;
++            $xfer += $input->readListBegin($_etype779, $_size776);
++            for ($_i780 = 0; $_i780 < $_size776; ++$_i780)
              {
-               $elem789 = null;
-               $elem789 = new \metastore\WMMapping();
-               $xfer += $elem789->read($input);
-               $this->mappings []= $elem789;
 -              $elem774 = null;
 -              $elem774 = new \metastore\WMMapping();
 -              $xfer += $elem774->read($input);
 -              $this->mappings []= $elem774;
++              $elem781 = null;
++              $elem781 = new \metastore\WMMapping();
++              $xfer += $elem781->read($input);
++              $this->mappings []= $elem781;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27309,15 -26359,15 +27229,15 @@@
          case 4:
            if ($ftype == TType::LST) {
              $this->triggers = array();
-             $_size790 = 0;
-             $_etype793 = 0;
-             $xfer += $input->readListBegin($_etype793, $_size790);
-             for ($_i794 = 0; $_i794 < $_size790; ++$_i794)
 -            $_size775 = 0;
 -            $_etype778 = 0;
 -            $xfer += $input->readListBegin($_etype778, $_size775);
 -            for ($_i779 = 0; $_i779 < $_size775; ++$_i779)
++            $_size782 = 0;
++            $_etype785 = 0;
++            $xfer += $input->readListBegin($_etype785, $_size782);
++            for ($_i786 = 0; $_i786 < $_size782; ++$_i786)
              {
-               $elem795 = null;
-               $elem795 = new \metastore\WMTrigger();
-               $xfer += $elem795->read($input);
-               $this->triggers []= $elem795;
 -              $elem780 = null;
 -              $elem780 = new \metastore\WMTrigger();
 -              $xfer += $elem780->read($input);
 -              $this->triggers []= $elem780;
++              $elem787 = null;
++              $elem787 = new \metastore\WMTrigger();
++              $xfer += $elem787->read($input);
++              $this->triggers []= $elem787;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27327,15 -26377,15 +27247,15 @@@
          case 5:
            if ($ftype == TType::LST) {
              $this->poolTriggers = array();
-             $_size796 = 0;
-             $_etype799 = 0;
-             $xfer += $input->readListBegin($_etype799, $_size796);
-             for ($_i800 = 0; $_i800 < $_size796; ++$_i800)
 -            $_size781 = 0;
 -            $_etype784 = 0;
 -            $xfer += $input->readListBegin($_etype784, $_size781);
 -            for ($_i785 = 0; $_i785 < $_size781; ++$_i785)
++            $_size788 = 0;
++            $_etype791 = 0;
++            $xfer += $input->readListBegin($_etype791, $_size788);
++            for ($_i792 = 0; $_i792 < $_size788; ++$_i792)
              {
-               $elem801 = null;
-               $elem801 = new \metastore\WMPoolTrigger();
-               $xfer += $elem801->read($input);
-               $this->poolTriggers []= $elem801;
 -              $elem786 = null;
 -              $elem786 = new \metastore\WMPoolTrigger();
 -              $xfer += $elem786->read($input);
 -              $this->poolTriggers []= $elem786;
++              $elem793 = null;
++              $elem793 = new \metastore\WMPoolTrigger();
++              $xfer += $elem793->read($input);
++              $this->poolTriggers []= $elem793;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -27371,9 -26421,9 +27291,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->pools));
          {
-           foreach ($this->pools as $iter802)
 -          foreach ($this->pools as $iter787)
++          foreach ($this->pools as $iter794)
            {
-             $xfer += $iter802->write($output);
 -            $xfer += $iter787->write($output);
++            $xfer += $iter794->write($output);
            }
          }
          $output->writeListEnd();
@@@ -27388,9 -26438,9 +27308,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->mappings));
          {
-           foreach ($this->mappings as $iter803)
 -          foreach ($this->mappings as $iter788)
++          foreach ($this->mappings as $iter795)
            {
-             $xfer += $iter803->write($output);
 -            $xfer += $iter788->write($output);
++            $xfer += $iter795->write($output);
            }
          }
          $output->writeListEnd();
@@@ -27405,9 -26455,9 +27325,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->triggers));
          {
-           foreach ($this->triggers as $iter804)
 -          foreach ($this->triggers as $iter789)
++          foreach ($this->triggers as $iter796)
            {
-             $xfer += $iter804->write($output);
 -            $xfer += $iter789->write($output);
++            $xfer += $iter796->write($output);
            }
          }
          $output->writeListEnd();
@@@ -27422,9 -26472,9 +27342,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->poolTriggers));
          {
-           foreach ($this->poolTriggers as $iter805)
 -          foreach ($this->poolTriggers as $iter790)
++          foreach ($this->poolTriggers as $iter797)
            {
-             $xfer += $iter805->write($output);
 -            $xfer += $iter790->write($output);
++            $xfer += $iter797->write($output);
            }
          }
          $output->writeListEnd();
@@@ -27977,15 -27027,15 +27897,15 @@@ class WMGetAllResourcePlanResponse 
          case 1:
            if ($ftype == TType::LST) {
              $this->resourcePlans = array();
-             $_size806 = 0;
-             $_etype809 = 0;
-             $xfer += $input->readListBegin($_etype809, $_size806);
-             for ($_i810 = 0; $_i810 < $_size806; ++$_i810)
 -            $_size791 = 0;
 -            $_etype794 = 0;
 -            $xfer += $input->readListBegin($_etype794, $_size791);
 -            for ($_i795 = 0; $_i795 < $_size791; ++$_i795)
++            $_size798 = 0;
++            $_etype801 = 0;
++            $xfer += $input->readListBegin($_etype801, $_size798);
++            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
              {
-               $elem811 = null;
-               $elem811 = new \metastore\WMResourcePlan();
-               $xfer += $elem811->read($input);
-               $this->resourcePlans []= $elem811;
 -              $elem796 = null;
 -              $elem796 = new \metastore\WMResourcePlan();
 -              $xfer += $elem796->read($input);
 -              $this->resourcePlans []= $elem796;
++              $elem803 = null;
++              $elem803 = new \metastore\WMResourcePlan();
++              $xfer += $elem803->read($input);
++              $this->resourcePlans []= $elem803;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -28013,9 -27063,9 +27933,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->resourcePlans));
          {
-           foreach ($this->resourcePlans as $iter812)
 -          foreach ($this->resourcePlans as $iter797)
++          foreach ($this->resourcePlans as $iter804)
            {
-             $xfer += $iter812->write($output);
 -            $xfer += $iter797->write($output);
++            $xfer += $iter804->write($output);
            }
          }
          $output->writeListEnd();
@@@ -28421,14 -27471,14 +28341,14 @@@ class WMValidateResourcePlanResponse 
          case 1:
            if ($ftype == TType::LST) {
              $this->errors = array();
-             $_size813 = 0;
-             $_etype816 = 0;
-             $xfer += $input->readListBegin($_etype816, $_size813);
-             for ($_i817 = 0; $_i817 < $_size813; ++$_i817)
 -            $_size798 = 0;
 -            $_etype801 = 0;
 -            $xfer += $input->readListBegin($_etype801, $_size798);
 -            for ($_i802 = 0; $_i802 < $_size798; ++$_i802)
++            $_size805 = 0;
++            $_etype808 = 0;
++            $xfer += $input->readListBegin($_etype808, $_size805);
++            for ($_i809 = 0; $_i809 < $_size805; ++$_i809)
              {
-               $elem818 = null;
-               $xfer += $input->readString($elem818);
-               $this->errors []= $elem818;
 -              $elem803 = null;
 -              $xfer += $input->readString($elem803);
 -              $this->errors []= $elem803;
++              $elem810 = null;
++              $xfer += $input->readString($elem810);
++              $this->errors []= $elem810;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -28438,14 -27488,14 +28358,14 @@@
          case 2:
            if ($ftype == TType::LST) {
              $this->warnings = array();
-             $_size819 = 0;
-             $_etype822 = 0;
-             $xfer += $input->readListBegin($_etype822, $_size819);
-             for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
 -            $_size804 = 0;
 -            $_etype807 = 0;
 -            $xfer += $input->readListBegin($_etype807, $_size804);
 -            for ($_i808 = 0; $_i808 < $_size804; ++$_i808)
++            $_size811 = 0;
++            $_etype814 = 0;
++            $xfer += $input->readListBegin($_etype814, $_size811);
++            for ($_i815 = 0; $_i815 < $_size811; ++$_i815)
              {
-               $elem824 = null;
-               $xfer += $input->readString($elem824);
-               $this->warnings []= $elem824;
 -              $elem809 = null;
 -              $xfer += $input->readString($elem809);
 -              $this->warnings []= $elem809;
++              $elem816 = null;
++              $xfer += $input->readString($elem816);
++              $this->warnings []= $elem816;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -28473,9 -27523,9 +28393,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->errors));
          {
-           foreach ($this->errors as $iter825)
 -          foreach ($this->errors as $iter810)
++          foreach ($this->errors as $iter817)
            {
-             $xfer += $output->writeString($iter825);
 -            $xfer += $output->writeString($iter810);
++            $xfer += $output->writeString($iter817);
            }
          }
          $output->writeListEnd();
@@@ -28490,9 -27540,9 +28410,9 @@@
        {
          $output->writeListBegin(TType::STRING, count($this->warnings));
          {
-           foreach ($this->warnings as $iter826)
 -          foreach ($this->warnings as $iter811)
++          foreach ($this->warnings as $iter818)
            {
-             $xfer += $output->writeString($iter826);
 -            $xfer += $output->writeString($iter811);
++            $xfer += $output->writeString($iter818);
            }
          }
          $output->writeListEnd();
@@@ -29165,15 -28215,15 +29085,15 @@@ class WMGetTriggersForResourePlanRespon
          case 1:
            if ($ftype == TType::LST) {
              $this->triggers = array();
-             $_size827 = 0;
-             $_etype830 = 0;
-             $xfer += $input->readListBegin($_etype830, $_size827);
-             for ($_i831 = 0; $_i831 < $_size827; ++$_i831)
 -            $_size812 = 0;
 -            $_etype815 = 0;
 -            $xfer += $input->readListBegin($_etype815, $_size812);
 -            for ($_i816 = 0; $_i816 < $_size812; ++$_i816)
++            $_size819 = 0;
++            $_etype822 = 0;
++            $xfer += $input->readListBegin($_etype822, $_size819);
++            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
              {
-               $elem832 = null;
-               $elem832 = new \metastore\WMTrigger();
-               $xfer += $elem832->read($input);
-               $this->triggers []= $elem832;
 -              $elem817 = null;
 -              $elem817 = new \metastore\WMTrigger();
 -              $xfer += $elem817->read($input);
 -              $this->triggers []= $elem817;
++              $elem824 = null;
++              $elem824 = new \metastore\WMTrigger();
++              $xfer += $elem824->read($input);
++              $this->triggers []= $elem824;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -29201,9 -28251,9 +29121,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->triggers));
          {
-           foreach ($this->triggers as $iter833)
 -          foreach ($this->triggers as $iter818)
++          foreach ($this->triggers as $iter825)
            {
-             $xfer += $iter833->write($output);
 -            $xfer += $iter818->write($output);
++            $xfer += $iter825->write($output);
            }
          }
          $output->writeListEnd();
@@@ -30787,15 -29837,15 +30707,15 @@@ class SchemaVersion 
          case 4:
            if ($ftype == TType::LST) {
              $this->cols = array();
-             $_size834 = 0;
-             $_etype837 = 0;
-             $xfer += $input->readListBegin($_etype837, $_size834);
-             for ($_i838 = 0; $_i838 < $_size834; ++$_i838)
 -            $_size819 = 0;
 -            $_etype822 = 0;
 -            $xfer += $input->readListBegin($_etype822, $_size819);
 -            for ($_i823 = 0; $_i823 < $_size819; ++$_i823)
++            $_size826 = 0;
++            $_etype829 = 0;
++            $xfer += $input->readListBegin($_etype829, $_size826);
++            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
              {
-               $elem839 = null;
-               $elem839 = new \metastore\FieldSchema();
-               $xfer += $elem839->read($input);
-               $this->cols []= $elem839;
 -              $elem824 = null;
 -              $elem824 = new \metastore\FieldSchema();
 -              $xfer += $elem824->read($input);
 -              $this->cols []= $elem824;
++              $elem831 = null;
++              $elem831 = new \metastore\FieldSchema();
++              $xfer += $elem831->read($input);
++              $this->cols []= $elem831;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -30884,9 -29934,9 +30804,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->cols));
          {
-           foreach ($this->cols as $iter840)
 -          foreach ($this->cols as $iter825)
++          foreach ($this->cols as $iter832)
            {
-             $xfer += $iter840->write($output);
 -            $xfer += $iter825->write($output);
++            $xfer += $iter832->write($output);
            }
          }
          $output->writeListEnd();
@@@ -31208,15 -30258,15 +31128,15 @@@ class FindSchemasByColsResp 
          case 1:
            if ($ftype == TType::LST) {
              $this->schemaVersions = array();
-             $_size841 = 0;
-             $_etype844 = 0;
-             $xfer += $input->readListBegin($_etype844, $_size841);
-             for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
 -            $_size826 = 0;
 -            $_etype829 = 0;
 -            $xfer += $input->readListBegin($_etype829, $_size826);
 -            for ($_i830 = 0; $_i830 < $_size826; ++$_i830)
++            $_size833 = 0;
++            $_etype836 = 0;
++            $xfer += $input->readListBegin($_etype836, $_size833);
++            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
              {
-               $elem846 = null;
-               $elem846 = new \metastore\SchemaVersionDescriptor();
-               $xfer += $elem846->read($input);
-               $this->schemaVersions []= $elem846;
 -              $elem831 = null;
 -              $elem831 = new \metastore\SchemaVersionDescriptor();
 -              $xfer += $elem831->read($input);
 -              $this->schemaVersions []= $elem831;
++              $elem838 = null;
++              $elem838 = new \metastore\SchemaVersionDescriptor();
++              $xfer += $elem838->read($input);
++              $this->schemaVersions []= $elem838;
              }
              $xfer += $input->readListEnd();
            } else {
@@@ -31244,9 -30294,9 +31164,9 @@@
        {
          $output->writeListBegin(TType::STRUCT, count($this->schemaVersions));
          {
-           foreach ($this->schemaVersions as $iter847)
 -          foreach ($this->schemaVersions as $iter832)
++          foreach ($this->schemaVersions as $iter839)
            {
-             $xfer += $iter847->write($output);
 -            $xfer += $iter832->write($output);
++            $xfer += $iter839->write($output);
            }
          }
          $output->writeListEnd();
@@@ -31760,621 -30810,6 +31680,621 @@@ class GetRuntimeStatsRequest 
  
  }
  
 +class AlterPartitionsRequest {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var string
 +   */
 +  public $catName = null;
 +  /**
 +   * @var string
 +   */
 +  public $dbName = null;
 +  /**
 +   * @var string
 +   */
 +  public $tableName = null;
 +  /**
 +   * @var \metastore\Partition[]
 +   */
 +  public $partitions = null;
 +  /**
 +   * @var \metastore\EnvironmentContext
 +   */
 +  public $environmentContext = null;
 +  /**
 +   * @var int
 +   */
 +  public $txnId = -1;
 +  /**
 +   * @var int
 +   */
 +  public $writeId = -1;
 +  /**
 +   * @var string
 +   */
 +  public $validWriteIdList = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'catName',
 +          'type' => TType::STRING,
 +          ),
 +        2 => array(
 +          'var' => 'dbName',
 +          'type' => TType::STRING,
 +          ),
 +        3 => array(
 +          'var' => 'tableName',
 +          'type' => TType::STRING,
 +          ),
 +        4 => array(
 +          'var' => 'partitions',
 +          'type' => TType::LST,
 +          'etype' => TType::STRUCT,
 +          'elem' => array(
 +            'type' => TType::STRUCT,
 +            'class' => '\metastore\Partition',
 +            ),
 +          ),
 +        5 => array(
 +          'var' => 'environmentContext',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\EnvironmentContext',
 +          ),
 +        6 => array(
 +          'var' => 'txnId',
 +          'type' => TType::I64,
 +          ),
 +        7 => array(
 +          'var' => 'writeId',
 +          'type' => TType::I64,
 +          ),
 +        8 => array(
 +          'var' => 'validWriteIdList',
 +          'type' => TType::STRING,
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['catName'])) {
 +        $this->catName = $vals['catName'];
 +      }
 +      if (isset($vals['dbName'])) {
 +        $this->dbName = $vals['dbName'];
 +      }
 +      if (isset($vals['tableName'])) {
 +        $this->tableName = $vals['tableName'];
 +      }
 +      if (isset($vals['partitions'])) {
 +        $this->partitions = $vals['partitions'];
 +      }
 +      if (isset($vals['environmentContext'])) {
 +        $this->environmentContext = $vals['environmentContext'];
 +      }
 +      if (isset($vals['txnId'])) {
 +        $this->txnId = $vals['txnId'];
 +      }
 +      if (isset($vals['writeId'])) {
 +        $this->writeId = $vals['writeId'];
 +      }
 +      if (isset($vals['validWriteIdList'])) {
 +        $this->validWriteIdList = $vals['validWriteIdList'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'AlterPartitionsRequest';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->catName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->dbName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->tableName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 4:
 +          if ($ftype == TType::LST) {
 +            $this->partitions = array();
-             $_size848 = 0;
-             $_etype851 = 0;
-             $xfer += $input->readListBegin($_etype851, $_size848);
-             for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
++            $_size840 = 0;
++            $_etype843 = 0;
++            $xfer += $input->readListBegin($_etype843, $_size840);
++            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
 +            {
-               $elem853 = null;
-               $elem853 = new \metastore\Partition();
-               $xfer += $elem853->read($input);
-               $this->partitions []= $elem853;
++              $elem845 = null;
++              $elem845 = new \metastore\Partition();
++              $xfer += $elem845->read($input);
++              $this->partitions []= $elem845;
 +            }
 +            $xfer += $input->readListEnd();
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 5:
 +          if ($ftype == TType::STRUCT) {
 +            $this->environmentContext = new \metastore\EnvironmentContext();
 +            $xfer += $this->environmentContext->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 6:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->txnId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 7:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->writeId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 8:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->validWriteIdList);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('AlterPartitionsRequest');
 +    if ($this->catName !== null) {
 +      $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
 +      $xfer += $output->writeString($this->catName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->dbName !== null) {
 +      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2);
 +      $xfer += $output->writeString($this->dbName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tableName !== null) {
 +      $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3);
 +      $xfer += $output->writeString($this->tableName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->partitions !== null) {
 +      if (!is_array($this->partitions)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('partitions', TType::LST, 4);
 +      {
 +        $output->writeListBegin(TType::STRUCT, count($this->partitions));
 +        {
-           foreach ($this->partitions as $iter854)
++          foreach ($this->partitions as $iter846)
 +          {
-             $xfer += $iter854->write($output);
++            $xfer += $iter846->write($output);
 +          }
 +        }
 +        $output->writeListEnd();
 +      }
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->environmentContext !== null) {
 +      if (!is_object($this->environmentContext)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('environmentContext', TType::STRUCT, 5);
 +      $xfer += $this->environmentContext->write($output);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->txnId !== null) {
 +      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
 +      $xfer += $output->writeI64($this->txnId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->writeId !== null) {
 +      $xfer += $output->writeFieldBegin('writeId', TType::I64, 7);
 +      $xfer += $output->writeI64($this->writeId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->validWriteIdList !== null) {
 +      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
 +      $xfer += $output->writeString($this->validWriteIdList);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class AlterPartitionsResponse {
 +  static $_TSPEC;
 +
 +
 +  public function __construct() {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        );
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'AlterPartitionsResponse';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('AlterPartitionsResponse');
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class AlterTableRequest {
 +  static $_TSPEC;
 +
 +  /**
 +   * @var string
 +   */
 +  public $catName = null;
 +  /**
 +   * @var string
 +   */
 +  public $dbName = null;
 +  /**
 +   * @var string
 +   */
 +  public $tableName = null;
 +  /**
 +   * @var \metastore\Table
 +   */
 +  public $table = null;
 +  /**
 +   * @var \metastore\EnvironmentContext
 +   */
 +  public $environmentContext = null;
 +  /**
 +   * @var int
 +   */
 +  public $txnId = -1;
 +  /**
 +   * @var int
 +   */
 +  public $writeId = -1;
 +  /**
 +   * @var string
 +   */
 +  public $validWriteIdList = null;
 +
 +  public function __construct($vals=null) {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        1 => array(
 +          'var' => 'catName',
 +          'type' => TType::STRING,
 +          ),
 +        2 => array(
 +          'var' => 'dbName',
 +          'type' => TType::STRING,
 +          ),
 +        3 => array(
 +          'var' => 'tableName',
 +          'type' => TType::STRING,
 +          ),
 +        4 => array(
 +          'var' => 'table',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\Table',
 +          ),
 +        5 => array(
 +          'var' => 'environmentContext',
 +          'type' => TType::STRUCT,
 +          'class' => '\metastore\EnvironmentContext',
 +          ),
 +        6 => array(
 +          'var' => 'txnId',
 +          'type' => TType::I64,
 +          ),
 +        7 => array(
 +          'var' => 'writeId',
 +          'type' => TType::I64,
 +          ),
 +        8 => array(
 +          'var' => 'validWriteIdList',
 +          'type' => TType::STRING,
 +          ),
 +        );
 +    }
 +    if (is_array($vals)) {
 +      if (isset($vals['catName'])) {
 +        $this->catName = $vals['catName'];
 +      }
 +      if (isset($vals['dbName'])) {
 +        $this->dbName = $vals['dbName'];
 +      }
 +      if (isset($vals['tableName'])) {
 +        $this->tableName = $vals['tableName'];
 +      }
 +      if (isset($vals['table'])) {
 +        $this->table = $vals['table'];
 +      }
 +      if (isset($vals['environmentContext'])) {
 +        $this->environmentContext = $vals['environmentContext'];
 +      }
 +      if (isset($vals['txnId'])) {
 +        $this->txnId = $vals['txnId'];
 +      }
 +      if (isset($vals['writeId'])) {
 +        $this->writeId = $vals['writeId'];
 +      }
 +      if (isset($vals['validWriteIdList'])) {
 +        $this->validWriteIdList = $vals['validWriteIdList'];
 +      }
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'AlterTableRequest';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        case 1:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->catName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 2:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->dbName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 3:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->tableName);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 4:
 +          if ($ftype == TType::STRUCT) {
 +            $this->table = new \metastore\Table();
 +            $xfer += $this->table->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 5:
 +          if ($ftype == TType::STRUCT) {
 +            $this->environmentContext = new \metastore\EnvironmentContext();
 +            $xfer += $this->environmentContext->read($input);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 6:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->txnId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 7:
 +          if ($ftype == TType::I64) {
 +            $xfer += $input->readI64($this->writeId);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        case 8:
 +          if ($ftype == TType::STRING) {
 +            $xfer += $input->readString($this->validWriteIdList);
 +          } else {
 +            $xfer += $input->skip($ftype);
 +          }
 +          break;
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('AlterTableRequest');
 +    if ($this->catName !== null) {
 +      $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
 +      $xfer += $output->writeString($this->catName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->dbName !== null) {
 +      $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2);
 +      $xfer += $output->writeString($this->dbName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->tableName !== null) {
 +      $xfer += $output->writeFieldBegin('tableName', TType::STRING, 3);
 +      $xfer += $output->writeString($this->tableName);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->table !== null) {
 +      if (!is_object($this->table)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('table', TType::STRUCT, 4);
 +      $xfer += $this->table->write($output);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->environmentContext !== null) {
 +      if (!is_object($this->environmentContext)) {
 +        throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
 +      }
 +      $xfer += $output->writeFieldBegin('environmentContext', TType::STRUCT, 5);
 +      $xfer += $this->environmentContext->write($output);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->txnId !== null) {
 +      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
 +      $xfer += $output->writeI64($this->txnId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->writeId !== null) {
 +      $xfer += $output->writeFieldBegin('writeId', TType::I64, 7);
 +      $xfer += $output->writeI64($this->writeId);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    if ($this->validWriteIdList !== null) {
 +      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
 +      $xfer += $output->writeString($this->validWriteIdList);
 +      $xfer += $output->writeFieldEnd();
 +    }
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
 +class AlterTableResponse {
 +  static $_TSPEC;
 +
 +
 +  public function __construct() {
 +    if (!isset(self::$_TSPEC)) {
 +      self::$_TSPEC = array(
 +        );
 +    }
 +  }
 +
 +  public function getName() {
 +    return 'AlterTableResponse';
 +  }
 +
 +  public function read($input)
 +  {
 +    $xfer = 0;
 +    $fname = null;
 +    $ftype = 0;
 +    $fid = 0;
 +    $xfer += $input->readStructBegin($fname);
 +    while (true)
 +    {
 +      $xfer += $input->readFieldBegin($fname, $ftype, $fid);
 +      if ($ftype == TType::STOP) {
 +        break;
 +      }
 +      switch ($fid)
 +      {
 +        default:
 +          $xfer += $input->skip($ftype);
 +          break;
 +      }
 +      $xfer += $input->readFieldEnd();
 +    }
 +    $xfer += $input->readStructEnd();
 +    return $xfer;
 +  }
 +
 +  public function write($output) {
 +    $xfer = 0;
 +    $xfer += $output->writeStructBegin('AlterTableResponse');
 +    $xfer += $output->writeFieldStop();
 +    $xfer += $output->writeStructEnd();
 +    return $xfer;
 +  }
 +
 +}
 +
  class MetaException extends TException {
    static $_TSPEC;
  

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------


[42/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 489beed,1285c08..b7fe6ba
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@@ -19140,44 -18552,44 +19106,44 @@@ class WMFullResourcePlan
        elif fid == 2:
          if ftype == TType.LIST:
            self.pools = []
-           (_etype777, _size774) = iprot.readListBegin()
-           for _i778 in xrange(_size774):
-             _elem779 = WMPool()
-             _elem779.read(iprot)
-             self.pools.append(_elem779)
 -          (_etype763, _size760) = iprot.readListBegin()
 -          for _i764 in xrange(_size760):
 -            _elem765 = WMPool()
 -            _elem765.read(iprot)
 -            self.pools.append(_elem765)
++          (_etype770, _size767) = iprot.readListBegin()
++          for _i771 in xrange(_size767):
++            _elem772 = WMPool()
++            _elem772.read(iprot)
++            self.pools.append(_elem772)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 3:
          if ftype == TType.LIST:
            self.mappings = []
-           (_etype783, _size780) = iprot.readListBegin()
-           for _i784 in xrange(_size780):
-             _elem785 = WMMapping()
-             _elem785.read(iprot)
-             self.mappings.append(_elem785)
 -          (_etype769, _size766) = iprot.readListBegin()
 -          for _i770 in xrange(_size766):
 -            _elem771 = WMMapping()
 -            _elem771.read(iprot)
 -            self.mappings.append(_elem771)
++          (_etype776, _size773) = iprot.readListBegin()
++          for _i777 in xrange(_size773):
++            _elem778 = WMMapping()
++            _elem778.read(iprot)
++            self.mappings.append(_elem778)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 4:
          if ftype == TType.LIST:
            self.triggers = []
-           (_etype789, _size786) = iprot.readListBegin()
-           for _i790 in xrange(_size786):
-             _elem791 = WMTrigger()
-             _elem791.read(iprot)
-             self.triggers.append(_elem791)
 -          (_etype775, _size772) = iprot.readListBegin()
 -          for _i776 in xrange(_size772):
 -            _elem777 = WMTrigger()
 -            _elem777.read(iprot)
 -            self.triggers.append(_elem777)
++          (_etype782, _size779) = iprot.readListBegin()
++          for _i783 in xrange(_size779):
++            _elem784 = WMTrigger()
++            _elem784.read(iprot)
++            self.triggers.append(_elem784)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 5:
          if ftype == TType.LIST:
            self.poolTriggers = []
-           (_etype795, _size792) = iprot.readListBegin()
-           for _i796 in xrange(_size792):
-             _elem797 = WMPoolTrigger()
-             _elem797.read(iprot)
-             self.poolTriggers.append(_elem797)
 -          (_etype781, _size778) = iprot.readListBegin()
 -          for _i782 in xrange(_size778):
 -            _elem783 = WMPoolTrigger()
 -            _elem783.read(iprot)
 -            self.poolTriggers.append(_elem783)
++          (_etype788, _size785) = iprot.readListBegin()
++          for _i789 in xrange(_size785):
++            _elem790 = WMPoolTrigger()
++            _elem790.read(iprot)
++            self.poolTriggers.append(_elem790)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -19198,29 -18610,29 +19164,29 @@@
      if self.pools is not None:
        oprot.writeFieldBegin('pools', TType.LIST, 2)
        oprot.writeListBegin(TType.STRUCT, len(self.pools))
-       for iter798 in self.pools:
-         iter798.write(oprot)
 -      for iter784 in self.pools:
 -        iter784.write(oprot)
++      for iter791 in self.pools:
++        iter791.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.mappings is not None:
        oprot.writeFieldBegin('mappings', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.mappings))
-       for iter799 in self.mappings:
-         iter799.write(oprot)
 -      for iter785 in self.mappings:
 -        iter785.write(oprot)
++      for iter792 in self.mappings:
++        iter792.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.triggers is not None:
        oprot.writeFieldBegin('triggers', TType.LIST, 4)
        oprot.writeListBegin(TType.STRUCT, len(self.triggers))
-       for iter800 in self.triggers:
-         iter800.write(oprot)
 -      for iter786 in self.triggers:
 -        iter786.write(oprot)
++      for iter793 in self.triggers:
++        iter793.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.poolTriggers is not None:
        oprot.writeFieldBegin('poolTriggers', TType.LIST, 5)
        oprot.writeListBegin(TType.STRUCT, len(self.poolTriggers))
-       for iter801 in self.poolTriggers:
-         iter801.write(oprot)
 -      for iter787 in self.poolTriggers:
 -        iter787.write(oprot)
++      for iter794 in self.poolTriggers:
++        iter794.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -19694,11 -19106,11 +19660,11 @@@ class WMGetAllResourcePlanResponse
        if fid == 1:
          if ftype == TType.LIST:
            self.resourcePlans = []
-           (_etype805, _size802) = iprot.readListBegin()
-           for _i806 in xrange(_size802):
-             _elem807 = WMResourcePlan()
-             _elem807.read(iprot)
-             self.resourcePlans.append(_elem807)
 -          (_etype791, _size788) = iprot.readListBegin()
 -          for _i792 in xrange(_size788):
 -            _elem793 = WMResourcePlan()
 -            _elem793.read(iprot)
 -            self.resourcePlans.append(_elem793)
++          (_etype798, _size795) = iprot.readListBegin()
++          for _i799 in xrange(_size795):
++            _elem800 = WMResourcePlan()
++            _elem800.read(iprot)
++            self.resourcePlans.append(_elem800)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -19715,8 -19127,8 +19681,8 @@@
      if self.resourcePlans is not None:
        oprot.writeFieldBegin('resourcePlans', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.resourcePlans))
-       for iter808 in self.resourcePlans:
-         iter808.write(oprot)
 -      for iter794 in self.resourcePlans:
 -        iter794.write(oprot)
++      for iter801 in self.resourcePlans:
++        iter801.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -20020,20 -19432,20 +19986,20 @@@ class WMValidateResourcePlanResponse
        if fid == 1:
          if ftype == TType.LIST:
            self.errors = []
-           (_etype812, _size809) = iprot.readListBegin()
-           for _i813 in xrange(_size809):
-             _elem814 = iprot.readString()
-             self.errors.append(_elem814)
 -          (_etype798, _size795) = iprot.readListBegin()
 -          for _i799 in xrange(_size795):
 -            _elem800 = iprot.readString()
 -            self.errors.append(_elem800)
++          (_etype805, _size802) = iprot.readListBegin()
++          for _i806 in xrange(_size802):
++            _elem807 = iprot.readString()
++            self.errors.append(_elem807)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 2:
          if ftype == TType.LIST:
            self.warnings = []
-           (_etype818, _size815) = iprot.readListBegin()
-           for _i819 in xrange(_size815):
-             _elem820 = iprot.readString()
-             self.warnings.append(_elem820)
 -          (_etype804, _size801) = iprot.readListBegin()
 -          for _i805 in xrange(_size801):
 -            _elem806 = iprot.readString()
 -            self.warnings.append(_elem806)
++          (_etype811, _size808) = iprot.readListBegin()
++          for _i812 in xrange(_size808):
++            _elem813 = iprot.readString()
++            self.warnings.append(_elem813)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20050,15 -19462,15 +20016,15 @@@
      if self.errors is not None:
        oprot.writeFieldBegin('errors', TType.LIST, 1)
        oprot.writeListBegin(TType.STRING, len(self.errors))
-       for iter821 in self.errors:
-         oprot.writeString(iter821)
 -      for iter807 in self.errors:
 -        oprot.writeString(iter807)
++      for iter814 in self.errors:
++        oprot.writeString(iter814)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.warnings is not None:
        oprot.writeFieldBegin('warnings', TType.LIST, 2)
        oprot.writeListBegin(TType.STRING, len(self.warnings))
-       for iter822 in self.warnings:
-         oprot.writeString(iter822)
 -      for iter808 in self.warnings:
 -        oprot.writeString(iter808)
++      for iter815 in self.warnings:
++        oprot.writeString(iter815)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -20635,11 -20047,11 +20601,11 @@@ class WMGetTriggersForResourePlanRespon
        if fid == 1:
          if ftype == TType.LIST:
            self.triggers = []
-           (_etype826, _size823) = iprot.readListBegin()
-           for _i827 in xrange(_size823):
-             _elem828 = WMTrigger()
-             _elem828.read(iprot)
-             self.triggers.append(_elem828)
 -          (_etype812, _size809) = iprot.readListBegin()
 -          for _i813 in xrange(_size809):
 -            _elem814 = WMTrigger()
 -            _elem814.read(iprot)
 -            self.triggers.append(_elem814)
++          (_etype819, _size816) = iprot.readListBegin()
++          for _i820 in xrange(_size816):
++            _elem821 = WMTrigger()
++            _elem821.read(iprot)
++            self.triggers.append(_elem821)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20656,8 -20068,8 +20622,8 @@@
      if self.triggers is not None:
        oprot.writeFieldBegin('triggers', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.triggers))
-       for iter829 in self.triggers:
-         iter829.write(oprot)
 -      for iter815 in self.triggers:
 -        iter815.write(oprot)
++      for iter822 in self.triggers:
++        iter822.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -21841,11 -21253,11 +21807,11 @@@ class SchemaVersion
        elif fid == 4:
          if ftype == TType.LIST:
            self.cols = []
-           (_etype833, _size830) = iprot.readListBegin()
-           for _i834 in xrange(_size830):
-             _elem835 = FieldSchema()
-             _elem835.read(iprot)
-             self.cols.append(_elem835)
 -          (_etype819, _size816) = iprot.readListBegin()
 -          for _i820 in xrange(_size816):
 -            _elem821 = FieldSchema()
 -            _elem821.read(iprot)
 -            self.cols.append(_elem821)
++          (_etype826, _size823) = iprot.readListBegin()
++          for _i827 in xrange(_size823):
++            _elem828 = FieldSchema()
++            _elem828.read(iprot)
++            self.cols.append(_elem828)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21905,8 -21317,8 +21871,8 @@@
      if self.cols is not None:
        oprot.writeFieldBegin('cols', TType.LIST, 4)
        oprot.writeListBegin(TType.STRUCT, len(self.cols))
-       for iter836 in self.cols:
-         iter836.write(oprot)
 -      for iter822 in self.cols:
 -        iter822.write(oprot)
++      for iter829 in self.cols:
++        iter829.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.state is not None:
@@@ -22161,11 -21573,11 +22127,11 @@@ class FindSchemasByColsResp
        if fid == 1:
          if ftype == TType.LIST:
            self.schemaVersions = []
-           (_etype840, _size837) = iprot.readListBegin()
-           for _i841 in xrange(_size837):
-             _elem842 = SchemaVersionDescriptor()
-             _elem842.read(iprot)
-             self.schemaVersions.append(_elem842)
 -          (_etype826, _size823) = iprot.readListBegin()
 -          for _i827 in xrange(_size823):
 -            _elem828 = SchemaVersionDescriptor()
 -            _elem828.read(iprot)
 -            self.schemaVersions.append(_elem828)
++          (_etype833, _size830) = iprot.readListBegin()
++          for _i834 in xrange(_size830):
++            _elem835 = SchemaVersionDescriptor()
++            _elem835.read(iprot)
++            self.schemaVersions.append(_elem835)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22182,8 -21594,8 +22148,8 @@@
      if self.schemaVersions is not None:
        oprot.writeFieldBegin('schemaVersions', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.schemaVersions))
-       for iter843 in self.schemaVersions:
-         iter843.write(oprot)
 -      for iter829 in self.schemaVersions:
 -        iter829.write(oprot)
++      for iter836 in self.schemaVersions:
++        iter836.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -22597,434 -22009,6 +22563,434 @@@ class GetRuntimeStatsRequest
      value = (value * 31) ^ hash(self.maxWeight)
      value = (value * 31) ^ hash(self.maxCreateTime)
      return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class AlterPartitionsRequest:
 +  """
 +  Attributes:
 +   - catName
 +   - dbName
 +   - tableName
 +   - partitions
 +   - environmentContext
 +   - txnId
 +   - writeId
 +   - validWriteIdList
 +  """
 +
 +  thrift_spec = (
 +    None, # 0
 +    (1, TType.STRING, 'catName', None, None, ), # 1
 +    (2, TType.STRING, 'dbName', None, None, ), # 2
 +    (3, TType.STRING, 'tableName', None, None, ), # 3
 +    (4, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 4
 +    (5, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 5
 +    (6, TType.I64, 'txnId', None, -1, ), # 6
 +    (7, TType.I64, 'writeId', None, -1, ), # 7
 +    (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
 +  )
 +
 +  def __init__(self, catName=None, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[6][4], writeId=thrift_spec[7][4], validWriteIdList=None,):
 +    self.catName = catName
 +    self.dbName = dbName
 +    self.tableName = tableName
 +    self.partitions = partitions
 +    self.environmentContext = environmentContext
 +    self.txnId = txnId
 +    self.writeId = writeId
 +    self.validWriteIdList = validWriteIdList
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 1:
 +        if ftype == TType.STRING:
 +          self.catName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 2:
 +        if ftype == TType.STRING:
 +          self.dbName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 3:
 +        if ftype == TType.STRING:
 +          self.tableName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 4:
 +        if ftype == TType.LIST:
 +          self.partitions = []
-           (_etype847, _size844) = iprot.readListBegin()
-           for _i848 in xrange(_size844):
-             _elem849 = Partition()
-             _elem849.read(iprot)
-             self.partitions.append(_elem849)
++          (_etype840, _size837) = iprot.readListBegin()
++          for _i841 in xrange(_size837):
++            _elem842 = Partition()
++            _elem842.read(iprot)
++            self.partitions.append(_elem842)
 +          iprot.readListEnd()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 5:
 +        if ftype == TType.STRUCT:
 +          self.environmentContext = EnvironmentContext()
 +          self.environmentContext.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 6:
 +        if ftype == TType.I64:
 +          self.txnId = iprot.readI64()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 7:
 +        if ftype == TType.I64:
 +          self.writeId = iprot.readI64()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 8:
 +        if ftype == TType.STRING:
 +          self.validWriteIdList = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('AlterPartitionsRequest')
 +    if self.catName is not None:
 +      oprot.writeFieldBegin('catName', TType.STRING, 1)
 +      oprot.writeString(self.catName)
 +      oprot.writeFieldEnd()
 +    if self.dbName is not None:
 +      oprot.writeFieldBegin('dbName', TType.STRING, 2)
 +      oprot.writeString(self.dbName)
 +      oprot.writeFieldEnd()
 +    if self.tableName is not None:
 +      oprot.writeFieldBegin('tableName', TType.STRING, 3)
 +      oprot.writeString(self.tableName)
 +      oprot.writeFieldEnd()
 +    if self.partitions is not None:
 +      oprot.writeFieldBegin('partitions', TType.LIST, 4)
 +      oprot.writeListBegin(TType.STRUCT, len(self.partitions))
-       for iter850 in self.partitions:
-         iter850.write(oprot)
++      for iter843 in self.partitions:
++        iter843.write(oprot)
 +      oprot.writeListEnd()
 +      oprot.writeFieldEnd()
 +    if self.environmentContext is not None:
 +      oprot.writeFieldBegin('environmentContext', TType.STRUCT, 5)
 +      self.environmentContext.write(oprot)
 +      oprot.writeFieldEnd()
 +    if self.txnId is not None:
 +      oprot.writeFieldBegin('txnId', TType.I64, 6)
 +      oprot.writeI64(self.txnId)
 +      oprot.writeFieldEnd()
 +    if self.writeId is not None:
 +      oprot.writeFieldBegin('writeId', TType.I64, 7)
 +      oprot.writeI64(self.writeId)
 +      oprot.writeFieldEnd()
 +    if self.validWriteIdList is not None:
 +      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
 +      oprot.writeString(self.validWriteIdList)
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    if self.dbName is None:
 +      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
 +    if self.tableName is None:
 +      raise TProtocol.TProtocolException(message='Required field tableName is unset!')
 +    if self.partitions is None:
 +      raise TProtocol.TProtocolException(message='Required field partitions is unset!')
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.catName)
 +    value = (value * 31) ^ hash(self.dbName)
 +    value = (value * 31) ^ hash(self.tableName)
 +    value = (value * 31) ^ hash(self.partitions)
 +    value = (value * 31) ^ hash(self.environmentContext)
 +    value = (value * 31) ^ hash(self.txnId)
 +    value = (value * 31) ^ hash(self.writeId)
 +    value = (value * 31) ^ hash(self.validWriteIdList)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class AlterPartitionsResponse:
 +
 +  thrift_spec = (
 +  )
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('AlterPartitionsResponse')
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class AlterTableRequest:
 +  """
 +  Attributes:
 +   - catName
 +   - dbName
 +   - tableName
 +   - table
 +   - environmentContext
 +   - txnId
 +   - writeId
 +   - validWriteIdList
 +  """
 +
 +  thrift_spec = (
 +    None, # 0
 +    (1, TType.STRING, 'catName', None, None, ), # 1
 +    (2, TType.STRING, 'dbName', None, None, ), # 2
 +    (3, TType.STRING, 'tableName', None, None, ), # 3
 +    (4, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 4
 +    (5, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 5
 +    (6, TType.I64, 'txnId', None, -1, ), # 6
 +    (7, TType.I64, 'writeId', None, -1, ), # 7
 +    (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
 +  )
 +
 +  def __init__(self, catName=None, dbName=None, tableName=None, table=None, environmentContext=None, txnId=thrift_spec[6][4], writeId=thrift_spec[7][4], validWriteIdList=None,):
 +    self.catName = catName
 +    self.dbName = dbName
 +    self.tableName = tableName
 +    self.table = table
 +    self.environmentContext = environmentContext
 +    self.txnId = txnId
 +    self.writeId = writeId
 +    self.validWriteIdList = validWriteIdList
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 1:
 +        if ftype == TType.STRING:
 +          self.catName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 2:
 +        if ftype == TType.STRING:
 +          self.dbName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 3:
 +        if ftype == TType.STRING:
 +          self.tableName = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 4:
 +        if ftype == TType.STRUCT:
 +          self.table = Table()
 +          self.table.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 5:
 +        if ftype == TType.STRUCT:
 +          self.environmentContext = EnvironmentContext()
 +          self.environmentContext.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 6:
 +        if ftype == TType.I64:
 +          self.txnId = iprot.readI64()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 7:
 +        if ftype == TType.I64:
 +          self.writeId = iprot.readI64()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 8:
 +        if ftype == TType.STRING:
 +          self.validWriteIdList = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('AlterTableRequest')
 +    if self.catName is not None:
 +      oprot.writeFieldBegin('catName', TType.STRING, 1)
 +      oprot.writeString(self.catName)
 +      oprot.writeFieldEnd()
 +    if self.dbName is not None:
 +      oprot.writeFieldBegin('dbName', TType.STRING, 2)
 +      oprot.writeString(self.dbName)
 +      oprot.writeFieldEnd()
 +    if self.tableName is not None:
 +      oprot.writeFieldBegin('tableName', TType.STRING, 3)
 +      oprot.writeString(self.tableName)
 +      oprot.writeFieldEnd()
 +    if self.table is not None:
 +      oprot.writeFieldBegin('table', TType.STRUCT, 4)
 +      self.table.write(oprot)
 +      oprot.writeFieldEnd()
 +    if self.environmentContext is not None:
 +      oprot.writeFieldBegin('environmentContext', TType.STRUCT, 5)
 +      self.environmentContext.write(oprot)
 +      oprot.writeFieldEnd()
 +    if self.txnId is not None:
 +      oprot.writeFieldBegin('txnId', TType.I64, 6)
 +      oprot.writeI64(self.txnId)
 +      oprot.writeFieldEnd()
 +    if self.writeId is not None:
 +      oprot.writeFieldBegin('writeId', TType.I64, 7)
 +      oprot.writeI64(self.writeId)
 +      oprot.writeFieldEnd()
 +    if self.validWriteIdList is not None:
 +      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
 +      oprot.writeString(self.validWriteIdList)
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    if self.dbName is None:
 +      raise TProtocol.TProtocolException(message='Required field dbName is unset!')
 +    if self.tableName is None:
 +      raise TProtocol.TProtocolException(message='Required field tableName is unset!')
 +    if self.table is None:
 +      raise TProtocol.TProtocolException(message='Required field table is unset!')
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.catName)
 +    value = (value * 31) ^ hash(self.dbName)
 +    value = (value * 31) ^ hash(self.tableName)
 +    value = (value * 31) ^ hash(self.table)
 +    value = (value * 31) ^ hash(self.environmentContext)
 +    value = (value * 31) ^ hash(self.txnId)
 +    value = (value * 31) ^ hash(self.writeId)
 +    value = (value * 31) ^ hash(self.validWriteIdList)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class AlterTableResponse:
 +
 +  thrift_spec = (
 +  )
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('AlterTableResponse')
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    return value
  
    def __repr__(self):
      L = ['%s=%r' % (key, value)

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 319e612,2bae133..92205ae
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@@ -197,69 -194,15 +197,77 @@@ public final class TxnDbUtil 
            " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
        );
  
+       stmt.execute("CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (" +
+           "  MRL_TXN_ID BIGINT NOT NULL, " +
+           "  MRL_DB_NAME VARCHAR(128) NOT NULL, " +
+           "  MRL_TBL_NAME VARCHAR(256) NOT NULL, " +
+           "  MRL_LAST_HEARTBEAT BIGINT NOT NULL, " +
+           "  PRIMARY KEY(MRL_TXN_ID))"
+       );
+ 
        try {
 +        stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
 +            " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
 +            " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
 +            " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
 +            " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
 +            " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " +
 +            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
 +            " PRIMARY KEY (TBL_ID))"
 +        );
 +      } catch (SQLException e) {
 +        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
 +          LOG.info("TBLS table already exist, ignoring");
 +        } else {
 +          throw e;
 +        }
 +      }
 +
 +      try {
 +        stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
 +            " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
 +            " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
 +            " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " +
 +            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
 +            " PRIMARY KEY (PART_ID))"
 +        );
 +      } catch (SQLException e) {
 +        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
 +          LOG.info("PARTITIONS table already exist, ignoring");
 +        } else {
 +          throw e;
 +        }
 +      }
 +
 +      try {
 +        stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
 +            " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
 +            " \"PARAM_VALUE\" CLOB, " +
 +            " PRIMARY KEY (TBL_ID, PARAM_KEY))"
 +        );
 +      } catch (SQLException e) {
 +        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
 +          LOG.info("TABLE_PARAMS table already exist, ignoring");
 +        } else {
 +          throw e;
 +        }
 +      }
 +
 +      try {
 +        stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
 +            " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
 +            " \"PARAM_VALUE\" VARCHAR(4000), " +
 +            " PRIMARY KEY (PART_ID, PARAM_KEY))"
 +        );
 +      } catch (SQLException e) {
 +        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
 +          LOG.info("PARTITION_PARAMS table already exist, ignoring");
 +        } else {
 +          throw e;
 +        }
 +      }
 +
 +      try {
          stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
  
                  "NULL, \"NEXT_VAL\" BIGINT NOT NULL)"

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index df35f22,33f24fb..080cc52
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@@ -135,20 -138,25 +139,29 @@@ public interface TxnStore extends Confi
     * @throws MetaException
     */
    @RetrySemantics.Idempotent
-   BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit(
-       String inputDbName, String inputTableName, ValidWriteIdList txnList)
+   Materialization getMaterializationInvalidationInfo(
+       final CreationMetadata cm, final String validTxnList)
            throws MetaException;
  
 +  @RetrySemantics.ReadOnly
 +  long getTxnIdForWriteId(String dbName, String tblName, long writeId)
 +      throws MetaException;
 +
-   /**
-    * Gets the list of valid write ids for the given table wrt to current txn
-    * @param rqst info on transaction and list of table names associated with given transaction
-    * @throws NoSuchTxnException
-    * @throws MetaException
-    */
+   LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout)
+       throws MetaException;
+ 
+     /**
+      * Gets the list of valid write ids for the given table wrt to current txn
+      * @param rqst info on transaction and list of table names associated with given transaction
+      * @throws NoSuchTxnException
+      * @throws MetaException
+      */
    @RetrySemantics.ReadOnly
    GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
            throws NoSuchTxnException,  MetaException;

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------


[21/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index a83017b..29e787b 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@ -334,14 +334,14 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    */
   public function get_table_objects_by_name_req(\metastore\GetTablesRequest $req);
   /**
-   * @param string $dbname
-   * @param string[] $tbl_names
-   * @return array
+   * @param \metastore\CreationMetadata $creation_metadata
+   * @param string $validTxnList
+   * @return \metastore\Materialization
    * @throws \metastore\MetaException
    * @throws \metastore\InvalidOperationException
    * @throws \metastore\UnknownDBException
    */
-  public function get_materialization_invalidation_info($dbname, array $tbl_names);
+  public function get_materialization_invalidation_info(\metastore\CreationMetadata $creation_metadata, $validTxnList);
   /**
    * @param string $catName
    * @param string $dbname
@@ -3999,17 +3999,17 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas
     throw new \Exception("get_table_objects_by_name_req failed: unknown result");
   }
 
-  public function get_materialization_invalidation_info($dbname, array $tbl_names)
+  public function get_materialization_invalidation_info(\metastore\CreationMetadata $creation_metadata, $validTxnList)
   {
-    $this->send_get_materialization_invalidation_info($dbname, $tbl_names);
+    $this->send_get_materialization_invalidation_info($creation_metadata, $validTxnList);
     return $this->recv_get_materialization_invalidation_info();
   }
 
-  public function send_get_materialization_invalidation_info($dbname, array $tbl_names)
+  public function send_get_materialization_invalidation_info(\metastore\CreationMetadata $creation_metadata, $validTxnList)
   {
     $args = new \metastore\ThriftHiveMetastore_get_materialization_invalidation_info_args();
-    $args->dbname = $dbname;
-    $args->tbl_names = $tbl_names;
+    $args->creation_metadata = $creation_metadata;
+    $args->validTxnList = $validTxnList;
     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
     if ($bin_accel)
     {
@@ -15496,14 +15496,14 @@ class ThriftHiveMetastore_get_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size841 = 0;
-            $_etype844 = 0;
-            $xfer += $input->readListBegin($_etype844, $_size841);
-            for ($_i845 = 0; $_i845 < $_size841; ++$_i845)
+            $_size833 = 0;
+            $_etype836 = 0;
+            $xfer += $input->readListBegin($_etype836, $_size833);
+            for ($_i837 = 0; $_i837 < $_size833; ++$_i837)
             {
-              $elem846 = null;
-              $xfer += $input->readString($elem846);
-              $this->success []= $elem846;
+              $elem838 = null;
+              $xfer += $input->readString($elem838);
+              $this->success []= $elem838;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15539,9 +15539,9 @@ class ThriftHiveMetastore_get_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter847)
+          foreach ($this->success as $iter839)
           {
-            $xfer += $output->writeString($iter847);
+            $xfer += $output->writeString($iter839);
           }
         }
         $output->writeListEnd();
@@ -15672,14 +15672,14 @@ class ThriftHiveMetastore_get_all_databases_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size848 = 0;
-            $_etype851 = 0;
-            $xfer += $input->readListBegin($_etype851, $_size848);
-            for ($_i852 = 0; $_i852 < $_size848; ++$_i852)
+            $_size840 = 0;
+            $_etype843 = 0;
+            $xfer += $input->readListBegin($_etype843, $_size840);
+            for ($_i844 = 0; $_i844 < $_size840; ++$_i844)
             {
-              $elem853 = null;
-              $xfer += $input->readString($elem853);
-              $this->success []= $elem853;
+              $elem845 = null;
+              $xfer += $input->readString($elem845);
+              $this->success []= $elem845;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -15715,9 +15715,9 @@ class ThriftHiveMetastore_get_all_databases_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter854)
+          foreach ($this->success as $iter846)
           {
-            $xfer += $output->writeString($iter854);
+            $xfer += $output->writeString($iter846);
           }
         }
         $output->writeListEnd();
@@ -16718,18 +16718,18 @@ class ThriftHiveMetastore_get_type_all_result {
         case 0:
           if ($ftype == TType::MAP) {
             $this->success = array();
-            $_size855 = 0;
-            $_ktype856 = 0;
-            $_vtype857 = 0;
-            $xfer += $input->readMapBegin($_ktype856, $_vtype857, $_size855);
-            for ($_i859 = 0; $_i859 < $_size855; ++$_i859)
+            $_size847 = 0;
+            $_ktype848 = 0;
+            $_vtype849 = 0;
+            $xfer += $input->readMapBegin($_ktype848, $_vtype849, $_size847);
+            for ($_i851 = 0; $_i851 < $_size847; ++$_i851)
             {
-              $key860 = '';
-              $val861 = new \metastore\Type();
-              $xfer += $input->readString($key860);
-              $val861 = new \metastore\Type();
-              $xfer += $val861->read($input);
-              $this->success[$key860] = $val861;
+              $key852 = '';
+              $val853 = new \metastore\Type();
+              $xfer += $input->readString($key852);
+              $val853 = new \metastore\Type();
+              $xfer += $val853->read($input);
+              $this->success[$key852] = $val853;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -16765,10 +16765,10 @@ class ThriftHiveMetastore_get_type_all_result {
       {
         $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $kiter862 => $viter863)
+          foreach ($this->success as $kiter854 => $viter855)
           {
-            $xfer += $output->writeString($kiter862);
-            $xfer += $viter863->write($output);
+            $xfer += $output->writeString($kiter854);
+            $xfer += $viter855->write($output);
           }
         }
         $output->writeMapEnd();
@@ -16972,15 +16972,15 @@ class ThriftHiveMetastore_get_fields_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size864 = 0;
-            $_etype867 = 0;
-            $xfer += $input->readListBegin($_etype867, $_size864);
-            for ($_i868 = 0; $_i868 < $_size864; ++$_i868)
+            $_size856 = 0;
+            $_etype859 = 0;
+            $xfer += $input->readListBegin($_etype859, $_size856);
+            for ($_i860 = 0; $_i860 < $_size856; ++$_i860)
             {
-              $elem869 = null;
-              $elem869 = new \metastore\FieldSchema();
-              $xfer += $elem869->read($input);
-              $this->success []= $elem869;
+              $elem861 = null;
+              $elem861 = new \metastore\FieldSchema();
+              $xfer += $elem861->read($input);
+              $this->success []= $elem861;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17032,9 +17032,9 @@ class ThriftHiveMetastore_get_fields_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter870)
+          foreach ($this->success as $iter862)
           {
-            $xfer += $iter870->write($output);
+            $xfer += $iter862->write($output);
           }
         }
         $output->writeListEnd();
@@ -17276,15 +17276,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size871 = 0;
-            $_etype874 = 0;
-            $xfer += $input->readListBegin($_etype874, $_size871);
-            for ($_i875 = 0; $_i875 < $_size871; ++$_i875)
+            $_size863 = 0;
+            $_etype866 = 0;
+            $xfer += $input->readListBegin($_etype866, $_size863);
+            for ($_i867 = 0; $_i867 < $_size863; ++$_i867)
             {
-              $elem876 = null;
-              $elem876 = new \metastore\FieldSchema();
-              $xfer += $elem876->read($input);
-              $this->success []= $elem876;
+              $elem868 = null;
+              $elem868 = new \metastore\FieldSchema();
+              $xfer += $elem868->read($input);
+              $this->success []= $elem868;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17336,9 +17336,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter877)
+          foreach ($this->success as $iter869)
           {
-            $xfer += $iter877->write($output);
+            $xfer += $iter869->write($output);
           }
         }
         $output->writeListEnd();
@@ -17552,15 +17552,15 @@ class ThriftHiveMetastore_get_schema_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size878 = 0;
-            $_etype881 = 0;
-            $xfer += $input->readListBegin($_etype881, $_size878);
-            for ($_i882 = 0; $_i882 < $_size878; ++$_i882)
+            $_size870 = 0;
+            $_etype873 = 0;
+            $xfer += $input->readListBegin($_etype873, $_size870);
+            for ($_i874 = 0; $_i874 < $_size870; ++$_i874)
             {
-              $elem883 = null;
-              $elem883 = new \metastore\FieldSchema();
-              $xfer += $elem883->read($input);
-              $this->success []= $elem883;
+              $elem875 = null;
+              $elem875 = new \metastore\FieldSchema();
+              $xfer += $elem875->read($input);
+              $this->success []= $elem875;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17612,9 +17612,9 @@ class ThriftHiveMetastore_get_schema_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter884)
+          foreach ($this->success as $iter876)
           {
-            $xfer += $iter884->write($output);
+            $xfer += $iter876->write($output);
           }
         }
         $output->writeListEnd();
@@ -17856,15 +17856,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size885 = 0;
-            $_etype888 = 0;
-            $xfer += $input->readListBegin($_etype888, $_size885);
-            for ($_i889 = 0; $_i889 < $_size885; ++$_i889)
+            $_size877 = 0;
+            $_etype880 = 0;
+            $xfer += $input->readListBegin($_etype880, $_size877);
+            for ($_i881 = 0; $_i881 < $_size877; ++$_i881)
             {
-              $elem890 = null;
-              $elem890 = new \metastore\FieldSchema();
-              $xfer += $elem890->read($input);
-              $this->success []= $elem890;
+              $elem882 = null;
+              $elem882 = new \metastore\FieldSchema();
+              $xfer += $elem882->read($input);
+              $this->success []= $elem882;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -17916,9 +17916,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter891)
+          foreach ($this->success as $iter883)
           {
-            $xfer += $iter891->write($output);
+            $xfer += $iter883->write($output);
           }
         }
         $output->writeListEnd();
@@ -18590,15 +18590,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->primaryKeys = array();
-            $_size892 = 0;
-            $_etype895 = 0;
-            $xfer += $input->readListBegin($_etype895, $_size892);
-            for ($_i896 = 0; $_i896 < $_size892; ++$_i896)
+            $_size884 = 0;
+            $_etype887 = 0;
+            $xfer += $input->readListBegin($_etype887, $_size884);
+            for ($_i888 = 0; $_i888 < $_size884; ++$_i888)
             {
-              $elem897 = null;
-              $elem897 = new \metastore\SQLPrimaryKey();
-              $xfer += $elem897->read($input);
-              $this->primaryKeys []= $elem897;
+              $elem889 = null;
+              $elem889 = new \metastore\SQLPrimaryKey();
+              $xfer += $elem889->read($input);
+              $this->primaryKeys []= $elem889;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18608,15 +18608,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->foreignKeys = array();
-            $_size898 = 0;
-            $_etype901 = 0;
-            $xfer += $input->readListBegin($_etype901, $_size898);
-            for ($_i902 = 0; $_i902 < $_size898; ++$_i902)
+            $_size890 = 0;
+            $_etype893 = 0;
+            $xfer += $input->readListBegin($_etype893, $_size890);
+            for ($_i894 = 0; $_i894 < $_size890; ++$_i894)
             {
-              $elem903 = null;
-              $elem903 = new \metastore\SQLForeignKey();
-              $xfer += $elem903->read($input);
-              $this->foreignKeys []= $elem903;
+              $elem895 = null;
+              $elem895 = new \metastore\SQLForeignKey();
+              $xfer += $elem895->read($input);
+              $this->foreignKeys []= $elem895;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18626,15 +18626,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 4:
           if ($ftype == TType::LST) {
             $this->uniqueConstraints = array();
-            $_size904 = 0;
-            $_etype907 = 0;
-            $xfer += $input->readListBegin($_etype907, $_size904);
-            for ($_i908 = 0; $_i908 < $_size904; ++$_i908)
+            $_size896 = 0;
+            $_etype899 = 0;
+            $xfer += $input->readListBegin($_etype899, $_size896);
+            for ($_i900 = 0; $_i900 < $_size896; ++$_i900)
             {
-              $elem909 = null;
-              $elem909 = new \metastore\SQLUniqueConstraint();
-              $xfer += $elem909->read($input);
-              $this->uniqueConstraints []= $elem909;
+              $elem901 = null;
+              $elem901 = new \metastore\SQLUniqueConstraint();
+              $xfer += $elem901->read($input);
+              $this->uniqueConstraints []= $elem901;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18644,15 +18644,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->notNullConstraints = array();
-            $_size910 = 0;
-            $_etype913 = 0;
-            $xfer += $input->readListBegin($_etype913, $_size910);
-            for ($_i914 = 0; $_i914 < $_size910; ++$_i914)
+            $_size902 = 0;
+            $_etype905 = 0;
+            $xfer += $input->readListBegin($_etype905, $_size902);
+            for ($_i906 = 0; $_i906 < $_size902; ++$_i906)
             {
-              $elem915 = null;
-              $elem915 = new \metastore\SQLNotNullConstraint();
-              $xfer += $elem915->read($input);
-              $this->notNullConstraints []= $elem915;
+              $elem907 = null;
+              $elem907 = new \metastore\SQLNotNullConstraint();
+              $xfer += $elem907->read($input);
+              $this->notNullConstraints []= $elem907;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18662,15 +18662,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->defaultConstraints = array();
-            $_size916 = 0;
-            $_etype919 = 0;
-            $xfer += $input->readListBegin($_etype919, $_size916);
-            for ($_i920 = 0; $_i920 < $_size916; ++$_i920)
+            $_size908 = 0;
+            $_etype911 = 0;
+            $xfer += $input->readListBegin($_etype911, $_size908);
+            for ($_i912 = 0; $_i912 < $_size908; ++$_i912)
             {
-              $elem921 = null;
-              $elem921 = new \metastore\SQLDefaultConstraint();
-              $xfer += $elem921->read($input);
-              $this->defaultConstraints []= $elem921;
+              $elem913 = null;
+              $elem913 = new \metastore\SQLDefaultConstraint();
+              $xfer += $elem913->read($input);
+              $this->defaultConstraints []= $elem913;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18680,15 +18680,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
         case 7:
           if ($ftype == TType::LST) {
             $this->checkConstraints = array();
-            $_size922 = 0;
-            $_etype925 = 0;
-            $xfer += $input->readListBegin($_etype925, $_size922);
-            for ($_i926 = 0; $_i926 < $_size922; ++$_i926)
+            $_size914 = 0;
+            $_etype917 = 0;
+            $xfer += $input->readListBegin($_etype917, $_size914);
+            for ($_i918 = 0; $_i918 < $_size914; ++$_i918)
             {
-              $elem927 = null;
-              $elem927 = new \metastore\SQLCheckConstraint();
-              $xfer += $elem927->read($input);
-              $this->checkConstraints []= $elem927;
+              $elem919 = null;
+              $elem919 = new \metastore\SQLCheckConstraint();
+              $xfer += $elem919->read($input);
+              $this->checkConstraints []= $elem919;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -18724,9 +18724,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->primaryKeys));
         {
-          foreach ($this->primaryKeys as $iter928)
+          foreach ($this->primaryKeys as $iter920)
           {
-            $xfer += $iter928->write($output);
+            $xfer += $iter920->write($output);
           }
         }
         $output->writeListEnd();
@@ -18741,9 +18741,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->foreignKeys));
         {
-          foreach ($this->foreignKeys as $iter929)
+          foreach ($this->foreignKeys as $iter921)
           {
-            $xfer += $iter929->write($output);
+            $xfer += $iter921->write($output);
           }
         }
         $output->writeListEnd();
@@ -18758,9 +18758,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints));
         {
-          foreach ($this->uniqueConstraints as $iter930)
+          foreach ($this->uniqueConstraints as $iter922)
           {
-            $xfer += $iter930->write($output);
+            $xfer += $iter922->write($output);
           }
         }
         $output->writeListEnd();
@@ -18775,9 +18775,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints));
         {
-          foreach ($this->notNullConstraints as $iter931)
+          foreach ($this->notNullConstraints as $iter923)
           {
-            $xfer += $iter931->write($output);
+            $xfer += $iter923->write($output);
           }
         }
         $output->writeListEnd();
@@ -18792,9 +18792,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints));
         {
-          foreach ($this->defaultConstraints as $iter932)
+          foreach ($this->defaultConstraints as $iter924)
           {
-            $xfer += $iter932->write($output);
+            $xfer += $iter924->write($output);
           }
         }
         $output->writeListEnd();
@@ -18809,9 +18809,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->checkConstraints));
         {
-          foreach ($this->checkConstraints as $iter933)
+          foreach ($this->checkConstraints as $iter925)
           {
-            $xfer += $iter933->write($output);
+            $xfer += $iter925->write($output);
           }
         }
         $output->writeListEnd();
@@ -20811,14 +20811,14 @@ class ThriftHiveMetastore_truncate_table_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->partNames = array();
-            $_size934 = 0;
-            $_etype937 = 0;
-            $xfer += $input->readListBegin($_etype937, $_size934);
-            for ($_i938 = 0; $_i938 < $_size934; ++$_i938)
+            $_size926 = 0;
+            $_etype929 = 0;
+            $xfer += $input->readListBegin($_etype929, $_size926);
+            for ($_i930 = 0; $_i930 < $_size926; ++$_i930)
             {
-              $elem939 = null;
-              $xfer += $input->readString($elem939);
-              $this->partNames []= $elem939;
+              $elem931 = null;
+              $xfer += $input->readString($elem931);
+              $this->partNames []= $elem931;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -20856,9 +20856,9 @@ class ThriftHiveMetastore_truncate_table_args {
       {
         $output->writeListBegin(TType::STRING, count($this->partNames));
         {
-          foreach ($this->partNames as $iter940)
+          foreach ($this->partNames as $iter932)
           {
-            $xfer += $output->writeString($iter940);
+            $xfer += $output->writeString($iter932);
           }
         }
         $output->writeListEnd();
@@ -21109,14 +21109,14 @@ class ThriftHiveMetastore_get_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size941 = 0;
-            $_etype944 = 0;
-            $xfer += $input->readListBegin($_etype944, $_size941);
-            for ($_i945 = 0; $_i945 < $_size941; ++$_i945)
+            $_size933 = 0;
+            $_etype936 = 0;
+            $xfer += $input->readListBegin($_etype936, $_size933);
+            for ($_i937 = 0; $_i937 < $_size933; ++$_i937)
             {
-              $elem946 = null;
-              $xfer += $input->readString($elem946);
-              $this->success []= $elem946;
+              $elem938 = null;
+              $xfer += $input->readString($elem938);
+              $this->success []= $elem938;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21152,9 +21152,9 @@ class ThriftHiveMetastore_get_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter947)
+          foreach ($this->success as $iter939)
           {
-            $xfer += $output->writeString($iter947);
+            $xfer += $output->writeString($iter939);
           }
         }
         $output->writeListEnd();
@@ -21356,14 +21356,14 @@ class ThriftHiveMetastore_get_tables_by_type_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size948 = 0;
-            $_etype951 = 0;
-            $xfer += $input->readListBegin($_etype951, $_size948);
-            for ($_i952 = 0; $_i952 < $_size948; ++$_i952)
+            $_size940 = 0;
+            $_etype943 = 0;
+            $xfer += $input->readListBegin($_etype943, $_size940);
+            for ($_i944 = 0; $_i944 < $_size940; ++$_i944)
             {
-              $elem953 = null;
-              $xfer += $input->readString($elem953);
-              $this->success []= $elem953;
+              $elem945 = null;
+              $xfer += $input->readString($elem945);
+              $this->success []= $elem945;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21399,9 +21399,9 @@ class ThriftHiveMetastore_get_tables_by_type_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter954)
+          foreach ($this->success as $iter946)
           {
-            $xfer += $output->writeString($iter954);
+            $xfer += $output->writeString($iter946);
           }
         }
         $output->writeListEnd();
@@ -21557,14 +21557,14 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size955 = 0;
-            $_etype958 = 0;
-            $xfer += $input->readListBegin($_etype958, $_size955);
-            for ($_i959 = 0; $_i959 < $_size955; ++$_i959)
+            $_size947 = 0;
+            $_etype950 = 0;
+            $xfer += $input->readListBegin($_etype950, $_size947);
+            for ($_i951 = 0; $_i951 < $_size947; ++$_i951)
             {
-              $elem960 = null;
-              $xfer += $input->readString($elem960);
-              $this->success []= $elem960;
+              $elem952 = null;
+              $xfer += $input->readString($elem952);
+              $this->success []= $elem952;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21600,9 +21600,9 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter961)
+          foreach ($this->success as $iter953)
           {
-            $xfer += $output->writeString($iter961);
+            $xfer += $output->writeString($iter953);
           }
         }
         $output->writeListEnd();
@@ -21707,14 +21707,14 @@ class ThriftHiveMetastore_get_table_meta_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->tbl_types = array();
-            $_size962 = 0;
-            $_etype965 = 0;
-            $xfer += $input->readListBegin($_etype965, $_size962);
-            for ($_i966 = 0; $_i966 < $_size962; ++$_i966)
+            $_size954 = 0;
+            $_etype957 = 0;
+            $xfer += $input->readListBegin($_etype957, $_size954);
+            for ($_i958 = 0; $_i958 < $_size954; ++$_i958)
             {
-              $elem967 = null;
-              $xfer += $input->readString($elem967);
-              $this->tbl_types []= $elem967;
+              $elem959 = null;
+              $xfer += $input->readString($elem959);
+              $this->tbl_types []= $elem959;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21752,9 +21752,9 @@ class ThriftHiveMetastore_get_table_meta_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_types));
         {
-          foreach ($this->tbl_types as $iter968)
+          foreach ($this->tbl_types as $iter960)
           {
-            $xfer += $output->writeString($iter968);
+            $xfer += $output->writeString($iter960);
           }
         }
         $output->writeListEnd();
@@ -21831,15 +21831,15 @@ class ThriftHiveMetastore_get_table_meta_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size969 = 0;
-            $_etype972 = 0;
-            $xfer += $input->readListBegin($_etype972, $_size969);
-            for ($_i973 = 0; $_i973 < $_size969; ++$_i973)
+            $_size961 = 0;
+            $_etype964 = 0;
+            $xfer += $input->readListBegin($_etype964, $_size961);
+            for ($_i965 = 0; $_i965 < $_size961; ++$_i965)
             {
-              $elem974 = null;
-              $elem974 = new \metastore\TableMeta();
-              $xfer += $elem974->read($input);
-              $this->success []= $elem974;
+              $elem966 = null;
+              $elem966 = new \metastore\TableMeta();
+              $xfer += $elem966->read($input);
+              $this->success []= $elem966;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -21875,9 +21875,9 @@ class ThriftHiveMetastore_get_table_meta_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter975)
+          foreach ($this->success as $iter967)
           {
-            $xfer += $iter975->write($output);
+            $xfer += $iter967->write($output);
           }
         }
         $output->writeListEnd();
@@ -22033,14 +22033,14 @@ class ThriftHiveMetastore_get_all_tables_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size976 = 0;
-            $_etype979 = 0;
-            $xfer += $input->readListBegin($_etype979, $_size976);
-            for ($_i980 = 0; $_i980 < $_size976; ++$_i980)
+            $_size968 = 0;
+            $_etype971 = 0;
+            $xfer += $input->readListBegin($_etype971, $_size968);
+            for ($_i972 = 0; $_i972 < $_size968; ++$_i972)
             {
-              $elem981 = null;
-              $xfer += $input->readString($elem981);
-              $this->success []= $elem981;
+              $elem973 = null;
+              $xfer += $input->readString($elem973);
+              $this->success []= $elem973;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22076,9 +22076,9 @@ class ThriftHiveMetastore_get_all_tables_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter982)
+          foreach ($this->success as $iter974)
           {
-            $xfer += $output->writeString($iter982);
+            $xfer += $output->writeString($iter974);
           }
         }
         $output->writeListEnd();
@@ -22393,14 +22393,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
         case 2:
           if ($ftype == TType::LST) {
             $this->tbl_names = array();
-            $_size983 = 0;
-            $_etype986 = 0;
-            $xfer += $input->readListBegin($_etype986, $_size983);
-            for ($_i987 = 0; $_i987 < $_size983; ++$_i987)
+            $_size975 = 0;
+            $_etype978 = 0;
+            $xfer += $input->readListBegin($_etype978, $_size975);
+            for ($_i979 = 0; $_i979 < $_size975; ++$_i979)
             {
-              $elem988 = null;
-              $xfer += $input->readString($elem988);
-              $this->tbl_names []= $elem988;
+              $elem980 = null;
+              $xfer += $input->readString($elem980);
+              $this->tbl_names []= $elem980;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22433,9 +22433,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args {
       {
         $output->writeListBegin(TType::STRING, count($this->tbl_names));
         {
-          foreach ($this->tbl_names as $iter989)
+          foreach ($this->tbl_names as $iter981)
           {
-            $xfer += $output->writeString($iter989);
+            $xfer += $output->writeString($iter981);
           }
         }
         $output->writeListEnd();
@@ -22500,15 +22500,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size990 = 0;
-            $_etype993 = 0;
-            $xfer += $input->readListBegin($_etype993, $_size990);
-            for ($_i994 = 0; $_i994 < $_size990; ++$_i994)
+            $_size982 = 0;
+            $_etype985 = 0;
+            $xfer += $input->readListBegin($_etype985, $_size982);
+            for ($_i986 = 0; $_i986 < $_size982; ++$_i986)
             {
-              $elem995 = null;
-              $elem995 = new \metastore\Table();
-              $xfer += $elem995->read($input);
-              $this->success []= $elem995;
+              $elem987 = null;
+              $elem987 = new \metastore\Table();
+              $xfer += $elem987->read($input);
+              $this->success []= $elem987;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -22536,9 +22536,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter996)
+          foreach ($this->success as $iter988)
           {
-            $xfer += $iter996->write($output);
+            $xfer += $iter988->write($output);
           }
         }
         $output->writeListEnd();
@@ -23001,37 +23001,34 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
   static $_TSPEC;
 
   /**
-   * @var string
+   * @var \metastore\CreationMetadata
    */
-  public $dbname = null;
+  public $creation_metadata = null;
   /**
-   * @var string[]
+   * @var string
    */
-  public $tbl_names = null;
+  public $validTxnList = null;
 
   public function __construct($vals=null) {
     if (!isset(self::$_TSPEC)) {
       self::$_TSPEC = array(
         1 => array(
-          'var' => 'dbname',
-          'type' => TType::STRING,
+          'var' => 'creation_metadata',
+          'type' => TType::STRUCT,
+          'class' => '\metastore\CreationMetadata',
           ),
         2 => array(
-          'var' => 'tbl_names',
-          'type' => TType::LST,
-          'etype' => TType::STRING,
-          'elem' => array(
-            'type' => TType::STRING,
-            ),
+          'var' => 'validTxnList',
+          'type' => TType::STRING,
           ),
         );
     }
     if (is_array($vals)) {
-      if (isset($vals['dbname'])) {
-        $this->dbname = $vals['dbname'];
+      if (isset($vals['creation_metadata'])) {
+        $this->creation_metadata = $vals['creation_metadata'];
       }
-      if (isset($vals['tbl_names'])) {
-        $this->tbl_names = $vals['tbl_names'];
+      if (isset($vals['validTxnList'])) {
+        $this->validTxnList = $vals['validTxnList'];
       }
     }
   }
@@ -23056,25 +23053,16 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
       switch ($fid)
       {
         case 1:
-          if ($ftype == TType::STRING) {
-            $xfer += $input->readString($this->dbname);
+          if ($ftype == TType::STRUCT) {
+            $this->creation_metadata = new \metastore\CreationMetadata();
+            $xfer += $this->creation_metadata->read($input);
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
         case 2:
-          if ($ftype == TType::LST) {
-            $this->tbl_names = array();
-            $_size997 = 0;
-            $_etype1000 = 0;
-            $xfer += $input->readListBegin($_etype1000, $_size997);
-            for ($_i1001 = 0; $_i1001 < $_size997; ++$_i1001)
-            {
-              $elem1002 = null;
-              $xfer += $input->readString($elem1002);
-              $this->tbl_names []= $elem1002;
-            }
-            $xfer += $input->readListEnd();
+          if ($ftype == TType::STRING) {
+            $xfer += $input->readString($this->validTxnList);
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -23092,26 +23080,17 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args {
   public function write($output) {
     $xfer = 0;
     $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialization_invalidation_info_args');
-    if ($this->dbname !== null) {
-      $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1);
-      $xfer += $output->writeString($this->dbname);
-      $xfer += $output->writeFieldEnd();
-    }
-    if ($this->tbl_names !== null) {
-      if (!is_array($this->tbl_names)) {
+    if ($this->creation_metadata !== null) {
+      if (!is_object($this->creation_metadata)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('tbl_names', TType::LST, 2);
-      {
-        $output->writeListBegin(TType::STRING, count($this->tbl_names));
-        {
-          foreach ($this->tbl_names as $iter1003)
-          {
-            $xfer += $output->writeString($iter1003);
-          }
-        }
-        $output->writeListEnd();
-      }
+      $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 1);
+      $xfer += $this->creation_metadata->write($output);
+      $xfer += $output->writeFieldEnd();
+    }
+    if ($this->validTxnList !== null) {
+      $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 2);
+      $xfer += $output->writeString($this->validTxnList);
       $xfer += $output->writeFieldEnd();
     }
     $xfer += $output->writeFieldStop();
@@ -23125,7 +23104,7 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
   static $_TSPEC;
 
   /**
-   * @var array
+   * @var \metastore\Materialization
    */
   public $success = null;
   /**
@@ -23146,16 +23125,8 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
       self::$_TSPEC = array(
         0 => array(
           'var' => 'success',
-          'type' => TType::MAP,
-          'ktype' => TType::STRING,
-          'vtype' => TType::STRUCT,
-          'key' => array(
-            'type' => TType::STRING,
-          ),
-          'val' => array(
-            'type' => TType::STRUCT,
-            'class' => '\metastore\Materialization',
-            ),
+          'type' => TType::STRUCT,
+          'class' => '\metastore\Materialization',
           ),
         1 => array(
           'var' => 'o1',
@@ -23210,22 +23181,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
       switch ($fid)
       {
         case 0:
-          if ($ftype == TType::MAP) {
-            $this->success = array();
-            $_size1004 = 0;
-            $_ktype1005 = 0;
-            $_vtype1006 = 0;
-            $xfer += $input->readMapBegin($_ktype1005, $_vtype1006, $_size1004);
-            for ($_i1008 = 0; $_i1008 < $_size1004; ++$_i1008)
-            {
-              $key1009 = '';
-              $val1010 = new \metastore\Materialization();
-              $xfer += $input->readString($key1009);
-              $val1010 = new \metastore\Materialization();
-              $xfer += $val1010->read($input);
-              $this->success[$key1009] = $val1010;
-            }
-            $xfer += $input->readMapEnd();
+          if ($ftype == TType::STRUCT) {
+            $this->success = new \metastore\Materialization();
+            $xfer += $this->success->read($input);
           } else {
             $xfer += $input->skip($ftype);
           }
@@ -23268,21 +23226,11 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result {
     $xfer = 0;
     $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialization_invalidation_info_result');
     if ($this->success !== null) {
-      if (!is_array($this->success)) {
+      if (!is_object($this->success)) {
         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
       }
-      $xfer += $output->writeFieldBegin('success', TType::MAP, 0);
-      {
-        $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success));
-        {
-          foreach ($this->success as $kiter1011 => $viter1012)
-          {
-            $xfer += $output->writeString($kiter1011);
-            $xfer += $viter1012->write($output);
-          }
-        }
-        $output->writeMapEnd();
-      }
+      $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0);
+      $xfer += $this->success->write($output);
       $xfer += $output->writeFieldEnd();
     }
     if ($this->o1 !== null) {
@@ -23790,14 +23738,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1013 = 0;
-            $_etype1016 = 0;
-            $xfer += $input->readListBegin($_etype1016, $_size1013);
-            for ($_i1017 = 0; $_i1017 < $_size1013; ++$_i1017)
+            $_size989 = 0;
+            $_etype992 = 0;
+            $xfer += $input->readListBegin($_etype992, $_size989);
+            for ($_i993 = 0; $_i993 < $_size989; ++$_i993)
             {
-              $elem1018 = null;
-              $xfer += $input->readString($elem1018);
-              $this->success []= $elem1018;
+              $elem994 = null;
+              $xfer += $input->readString($elem994);
+              $this->success []= $elem994;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -23849,9 +23797,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1019)
+          foreach ($this->success as $iter995)
           {
-            $xfer += $output->writeString($iter1019);
+            $xfer += $output->writeString($iter995);
           }
         }
         $output->writeListEnd();
@@ -25164,15 +25112,15 @@ class ThriftHiveMetastore_add_partitions_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size1020 = 0;
-            $_etype1023 = 0;
-            $xfer += $input->readListBegin($_etype1023, $_size1020);
-            for ($_i1024 = 0; $_i1024 < $_size1020; ++$_i1024)
+            $_size996 = 0;
+            $_etype999 = 0;
+            $xfer += $input->readListBegin($_etype999, $_size996);
+            for ($_i1000 = 0; $_i1000 < $_size996; ++$_i1000)
             {
-              $elem1025 = null;
-              $elem1025 = new \metastore\Partition();
-              $xfer += $elem1025->read($input);
-              $this->new_parts []= $elem1025;
+              $elem1001 = null;
+              $elem1001 = new \metastore\Partition();
+              $xfer += $elem1001->read($input);
+              $this->new_parts []= $elem1001;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25200,9 +25148,9 @@ class ThriftHiveMetastore_add_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter1026)
+          foreach ($this->new_parts as $iter1002)
           {
-            $xfer += $iter1026->write($output);
+            $xfer += $iter1002->write($output);
           }
         }
         $output->writeListEnd();
@@ -25417,15 +25365,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size1027 = 0;
-            $_etype1030 = 0;
-            $xfer += $input->readListBegin($_etype1030, $_size1027);
-            for ($_i1031 = 0; $_i1031 < $_size1027; ++$_i1031)
+            $_size1003 = 0;
+            $_etype1006 = 0;
+            $xfer += $input->readListBegin($_etype1006, $_size1003);
+            for ($_i1007 = 0; $_i1007 < $_size1003; ++$_i1007)
             {
-              $elem1032 = null;
-              $elem1032 = new \metastore\PartitionSpec();
-              $xfer += $elem1032->read($input);
-              $this->new_parts []= $elem1032;
+              $elem1008 = null;
+              $elem1008 = new \metastore\PartitionSpec();
+              $xfer += $elem1008->read($input);
+              $this->new_parts []= $elem1008;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25453,9 +25401,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter1033)
+          foreach ($this->new_parts as $iter1009)
           {
-            $xfer += $iter1033->write($output);
+            $xfer += $iter1009->write($output);
           }
         }
         $output->writeListEnd();
@@ -25705,14 +25653,14 @@ class ThriftHiveMetastore_append_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1034 = 0;
-            $_etype1037 = 0;
-            $xfer += $input->readListBegin($_etype1037, $_size1034);
-            for ($_i1038 = 0; $_i1038 < $_size1034; ++$_i1038)
+            $_size1010 = 0;
+            $_etype1013 = 0;
+            $xfer += $input->readListBegin($_etype1013, $_size1010);
+            for ($_i1014 = 0; $_i1014 < $_size1010; ++$_i1014)
             {
-              $elem1039 = null;
-              $xfer += $input->readString($elem1039);
-              $this->part_vals []= $elem1039;
+              $elem1015 = null;
+              $xfer += $input->readString($elem1015);
+              $this->part_vals []= $elem1015;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -25750,9 +25698,9 @@ class ThriftHiveMetastore_append_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1040)
+          foreach ($this->part_vals as $iter1016)
           {
-            $xfer += $output->writeString($iter1040);
+            $xfer += $output->writeString($iter1016);
           }
         }
         $output->writeListEnd();
@@ -26254,14 +26202,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1041 = 0;
-            $_etype1044 = 0;
-            $xfer += $input->readListBegin($_etype1044, $_size1041);
-            for ($_i1045 = 0; $_i1045 < $_size1041; ++$_i1045)
+            $_size1017 = 0;
+            $_etype1020 = 0;
+            $xfer += $input->readListBegin($_etype1020, $_size1017);
+            for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021)
             {
-              $elem1046 = null;
-              $xfer += $input->readString($elem1046);
-              $this->part_vals []= $elem1046;
+              $elem1022 = null;
+              $xfer += $input->readString($elem1022);
+              $this->part_vals []= $elem1022;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -26307,9 +26255,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1047)
+          foreach ($this->part_vals as $iter1023)
           {
-            $xfer += $output->writeString($iter1047);
+            $xfer += $output->writeString($iter1023);
           }
         }
         $output->writeListEnd();
@@ -27163,14 +27111,14 @@ class ThriftHiveMetastore_drop_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1048 = 0;
-            $_etype1051 = 0;
-            $xfer += $input->readListBegin($_etype1051, $_size1048);
-            for ($_i1052 = 0; $_i1052 < $_size1048; ++$_i1052)
+            $_size1024 = 0;
+            $_etype1027 = 0;
+            $xfer += $input->readListBegin($_etype1027, $_size1024);
+            for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028)
             {
-              $elem1053 = null;
-              $xfer += $input->readString($elem1053);
-              $this->part_vals []= $elem1053;
+              $elem1029 = null;
+              $xfer += $input->readString($elem1029);
+              $this->part_vals []= $elem1029;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27215,9 +27163,9 @@ class ThriftHiveMetastore_drop_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1054)
+          foreach ($this->part_vals as $iter1030)
           {
-            $xfer += $output->writeString($iter1054);
+            $xfer += $output->writeString($iter1030);
           }
         }
         $output->writeListEnd();
@@ -27470,14 +27418,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1055 = 0;
-            $_etype1058 = 0;
-            $xfer += $input->readListBegin($_etype1058, $_size1055);
-            for ($_i1059 = 0; $_i1059 < $_size1055; ++$_i1059)
+            $_size1031 = 0;
+            $_etype1034 = 0;
+            $xfer += $input->readListBegin($_etype1034, $_size1031);
+            for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035)
             {
-              $elem1060 = null;
-              $xfer += $input->readString($elem1060);
-              $this->part_vals []= $elem1060;
+              $elem1036 = null;
+              $xfer += $input->readString($elem1036);
+              $this->part_vals []= $elem1036;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -27530,9 +27478,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1061)
+          foreach ($this->part_vals as $iter1037)
           {
-            $xfer += $output->writeString($iter1061);
+            $xfer += $output->writeString($iter1037);
           }
         }
         $output->writeListEnd();
@@ -28546,14 +28494,14 @@ class ThriftHiveMetastore_get_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1062 = 0;
-            $_etype1065 = 0;
-            $xfer += $input->readListBegin($_etype1065, $_size1062);
-            for ($_i1066 = 0; $_i1066 < $_size1062; ++$_i1066)
+            $_size1038 = 0;
+            $_etype1041 = 0;
+            $xfer += $input->readListBegin($_etype1041, $_size1038);
+            for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042)
             {
-              $elem1067 = null;
-              $xfer += $input->readString($elem1067);
-              $this->part_vals []= $elem1067;
+              $elem1043 = null;
+              $xfer += $input->readString($elem1043);
+              $this->part_vals []= $elem1043;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -28591,9 +28539,9 @@ class ThriftHiveMetastore_get_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1068)
+          foreach ($this->part_vals as $iter1044)
           {
-            $xfer += $output->writeString($iter1068);
+            $xfer += $output->writeString($iter1044);
           }
         }
         $output->writeListEnd();
@@ -28835,17 +28783,17 @@ class ThriftHiveMetastore_exchange_partition_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size1069 = 0;
-            $_ktype1070 = 0;
-            $_vtype1071 = 0;
-            $xfer += $input->readMapBegin($_ktype1070, $_vtype1071, $_size1069);
-            for ($_i1073 = 0; $_i1073 < $_size1069; ++$_i1073)
+            $_size1045 = 0;
+            $_ktype1046 = 0;
+            $_vtype1047 = 0;
+            $xfer += $input->readMapBegin($_ktype1046, $_vtype1047, $_size1045);
+            for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049)
             {
-              $key1074 = '';
-              $val1075 = '';
-              $xfer += $input->readString($key1074);
-              $xfer += $input->readString($val1075);
-              $this->partitionSpecs[$key1074] = $val1075;
+              $key1050 = '';
+              $val1051 = '';
+              $xfer += $input->readString($key1050);
+              $xfer += $input->readString($val1051);
+              $this->partitionSpecs[$key1050] = $val1051;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -28901,10 +28849,10 @@ class ThriftHiveMetastore_exchange_partition_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter1076 => $viter1077)
+          foreach ($this->partitionSpecs as $kiter1052 => $viter1053)
           {
-            $xfer += $output->writeString($kiter1076);
-            $xfer += $output->writeString($viter1077);
+            $xfer += $output->writeString($kiter1052);
+            $xfer += $output->writeString($viter1053);
           }
         }
         $output->writeMapEnd();
@@ -29216,17 +29164,17 @@ class ThriftHiveMetastore_exchange_partitions_args {
         case 1:
           if ($ftype == TType::MAP) {
             $this->partitionSpecs = array();
-            $_size1078 = 0;
-            $_ktype1079 = 0;
-            $_vtype1080 = 0;
-            $xfer += $input->readMapBegin($_ktype1079, $_vtype1080, $_size1078);
-            for ($_i1082 = 0; $_i1082 < $_size1078; ++$_i1082)
+            $_size1054 = 0;
+            $_ktype1055 = 0;
+            $_vtype1056 = 0;
+            $xfer += $input->readMapBegin($_ktype1055, $_vtype1056, $_size1054);
+            for ($_i1058 = 0; $_i1058 < $_size1054; ++$_i1058)
             {
-              $key1083 = '';
-              $val1084 = '';
-              $xfer += $input->readString($key1083);
-              $xfer += $input->readString($val1084);
-              $this->partitionSpecs[$key1083] = $val1084;
+              $key1059 = '';
+              $val1060 = '';
+              $xfer += $input->readString($key1059);
+              $xfer += $input->readString($val1060);
+              $this->partitionSpecs[$key1059] = $val1060;
             }
             $xfer += $input->readMapEnd();
           } else {
@@ -29282,10 +29230,10 @@ class ThriftHiveMetastore_exchange_partitions_args {
       {
         $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs));
         {
-          foreach ($this->partitionSpecs as $kiter1085 => $viter1086)
+          foreach ($this->partitionSpecs as $kiter1061 => $viter1062)
           {
-            $xfer += $output->writeString($kiter1085);
-            $xfer += $output->writeString($viter1086);
+            $xfer += $output->writeString($kiter1061);
+            $xfer += $output->writeString($viter1062);
           }
         }
         $output->writeMapEnd();
@@ -29418,15 +29366,15 @@ class ThriftHiveMetastore_exchange_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1087 = 0;
-            $_etype1090 = 0;
-            $xfer += $input->readListBegin($_etype1090, $_size1087);
-            for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091)
+            $_size1063 = 0;
+            $_etype1066 = 0;
+            $xfer += $input->readListBegin($_etype1066, $_size1063);
+            for ($_i1067 = 0; $_i1067 < $_size1063; ++$_i1067)
             {
-              $elem1092 = null;
-              $elem1092 = new \metastore\Partition();
-              $xfer += $elem1092->read($input);
-              $this->success []= $elem1092;
+              $elem1068 = null;
+              $elem1068 = new \metastore\Partition();
+              $xfer += $elem1068->read($input);
+              $this->success []= $elem1068;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29486,9 +29434,9 @@ class ThriftHiveMetastore_exchange_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1093)
+          foreach ($this->success as $iter1069)
           {
-            $xfer += $iter1093->write($output);
+            $xfer += $iter1069->write($output);
           }
         }
         $output->writeListEnd();
@@ -29634,14 +29582,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1094 = 0;
-            $_etype1097 = 0;
-            $xfer += $input->readListBegin($_etype1097, $_size1094);
-            for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098)
+            $_size1070 = 0;
+            $_etype1073 = 0;
+            $xfer += $input->readListBegin($_etype1073, $_size1070);
+            for ($_i1074 = 0; $_i1074 < $_size1070; ++$_i1074)
             {
-              $elem1099 = null;
-              $xfer += $input->readString($elem1099);
-              $this->part_vals []= $elem1099;
+              $elem1075 = null;
+              $xfer += $input->readString($elem1075);
+              $this->part_vals []= $elem1075;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29658,14 +29606,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1100 = 0;
-            $_etype1103 = 0;
-            $xfer += $input->readListBegin($_etype1103, $_size1100);
-            for ($_i1104 = 0; $_i1104 < $_size1100; ++$_i1104)
+            $_size1076 = 0;
+            $_etype1079 = 0;
+            $xfer += $input->readListBegin($_etype1079, $_size1076);
+            for ($_i1080 = 0; $_i1080 < $_size1076; ++$_i1080)
             {
-              $elem1105 = null;
-              $xfer += $input->readString($elem1105);
-              $this->group_names []= $elem1105;
+              $elem1081 = null;
+              $xfer += $input->readString($elem1081);
+              $this->group_names []= $elem1081;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -29703,9 +29651,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1106)
+          foreach ($this->part_vals as $iter1082)
           {
-            $xfer += $output->writeString($iter1106);
+            $xfer += $output->writeString($iter1082);
           }
         }
         $output->writeListEnd();
@@ -29725,9 +29673,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1107)
+          foreach ($this->group_names as $iter1083)
           {
-            $xfer += $output->writeString($iter1107);
+            $xfer += $output->writeString($iter1083);
           }
         }
         $output->writeListEnd();
@@ -30318,15 +30266,15 @@ class ThriftHiveMetastore_get_partitions_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1108 = 0;
-            $_etype1111 = 0;
-            $xfer += $input->readListBegin($_etype1111, $_size1108);
-            for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112)
+            $_size1084 = 0;
+            $_etype1087 = 0;
+            $xfer += $input->readListBegin($_etype1087, $_size1084);
+            for ($_i1088 = 0; $_i1088 < $_size1084; ++$_i1088)
             {
-              $elem1113 = null;
-              $elem1113 = new \metastore\Partition();
-              $xfer += $elem1113->read($input);
-              $this->success []= $elem1113;
+              $elem1089 = null;
+              $elem1089 = new \metastore\Partition();
+              $xfer += $elem1089->read($input);
+              $this->success []= $elem1089;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30370,9 +30318,9 @@ class ThriftHiveMetastore_get_partitions_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1114)
+          foreach ($this->success as $iter1090)
           {
-            $xfer += $iter1114->write($output);
+            $xfer += $iter1090->write($output);
           }
         }
         $output->writeListEnd();
@@ -30518,14 +30466,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
         case 5:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1115 = 0;
-            $_etype1118 = 0;
-            $xfer += $input->readListBegin($_etype1118, $_size1115);
-            for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119)
+            $_size1091 = 0;
+            $_etype1094 = 0;
+            $xfer += $input->readListBegin($_etype1094, $_size1091);
+            for ($_i1095 = 0; $_i1095 < $_size1091; ++$_i1095)
             {
-              $elem1120 = null;
-              $xfer += $input->readString($elem1120);
-              $this->group_names []= $elem1120;
+              $elem1096 = null;
+              $xfer += $input->readString($elem1096);
+              $this->group_names []= $elem1096;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30573,9 +30521,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1121)
+          foreach ($this->group_names as $iter1097)
           {
-            $xfer += $output->writeString($iter1121);
+            $xfer += $output->writeString($iter1097);
           }
         }
         $output->writeListEnd();
@@ -30664,15 +30612,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1122 = 0;
-            $_etype1125 = 0;
-            $xfer += $input->readListBegin($_etype1125, $_size1122);
-            for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126)
+            $_size1098 = 0;
+            $_etype1101 = 0;
+            $xfer += $input->readListBegin($_etype1101, $_size1098);
+            for ($_i1102 = 0; $_i1102 < $_size1098; ++$_i1102)
             {
-              $elem1127 = null;
-              $elem1127 = new \metastore\Partition();
-              $xfer += $elem1127->read($input);
-              $this->success []= $elem1127;
+              $elem1103 = null;
+              $elem1103 = new \metastore\Partition();
+              $xfer += $elem1103->read($input);
+              $this->success []= $elem1103;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30716,9 +30664,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1128)
+          foreach ($this->success as $iter1104)
           {
-            $xfer += $iter1128->write($output);
+            $xfer += $iter1104->write($output);
           }
         }
         $output->writeListEnd();
@@ -30938,15 +30886,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1129 = 0;
-            $_etype1132 = 0;
-            $xfer += $input->readListBegin($_etype1132, $_size1129);
-            for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133)
+            $_size1105 = 0;
+            $_etype1108 = 0;
+            $xfer += $input->readListBegin($_etype1108, $_size1105);
+            for ($_i1109 = 0; $_i1109 < $_size1105; ++$_i1109)
             {
-              $elem1134 = null;
-              $elem1134 = new \metastore\PartitionSpec();
-              $xfer += $elem1134->read($input);
-              $this->success []= $elem1134;
+              $elem1110 = null;
+              $elem1110 = new \metastore\PartitionSpec();
+              $xfer += $elem1110->read($input);
+              $this->success []= $elem1110;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -30990,9 +30938,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1135)
+          foreach ($this->success as $iter1111)
           {
-            $xfer += $iter1135->write($output);
+            $xfer += $iter1111->write($output);
           }
         }
         $output->writeListEnd();
@@ -31211,14 +31159,14 @@ class ThriftHiveMetastore_get_partition_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1136 = 0;
-            $_etype1139 = 0;
-            $xfer += $input->readListBegin($_etype1139, $_size1136);
-            for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140)
+            $_size1112 = 0;
+            $_etype1115 = 0;
+            $xfer += $input->readListBegin($_etype1115, $_size1112);
+            for ($_i1116 = 0; $_i1116 < $_size1112; ++$_i1116)
             {
-              $elem1141 = null;
-              $xfer += $input->readString($elem1141);
-              $this->success []= $elem1141;
+              $elem1117 = null;
+              $xfer += $input->readString($elem1117);
+              $this->success []= $elem1117;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31262,9 +31210,9 @@ class ThriftHiveMetastore_get_partition_names_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1142)
+          foreach ($this->success as $iter1118)
           {
-            $xfer += $output->writeString($iter1142);
+            $xfer += $output->writeString($iter1118);
           }
         }
         $output->writeListEnd();
@@ -31595,14 +31543,14 @@ class ThriftHiveMetastore_get_partitions_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1143 = 0;
-            $_etype1146 = 0;
-            $xfer += $input->readListBegin($_etype1146, $_size1143);
-            for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147)
+            $_size1119 = 0;
+            $_etype1122 = 0;
+            $xfer += $input->readListBegin($_etype1122, $_size1119);
+            for ($_i1123 = 0; $_i1123 < $_size1119; ++$_i1123)
             {
-              $elem1148 = null;
-              $xfer += $input->readString($elem1148);
-              $this->part_vals []= $elem1148;
+              $elem1124 = null;
+              $xfer += $input->readString($elem1124);
+              $this->part_vals []= $elem1124;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31647,9 +31595,9 @@ class ThriftHiveMetastore_get_partitions_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1149)
+          foreach ($this->part_vals as $iter1125)
           {
-            $xfer += $output->writeString($iter1149);
+            $xfer += $output->writeString($iter1125);
           }
         }
         $output->writeListEnd();
@@ -31743,15 +31691,15 @@ class ThriftHiveMetastore_get_partitions_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1150 = 0;
-            $_etype1153 = 0;
-            $xfer += $input->readListBegin($_etype1153, $_size1150);
-            for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154)
+            $_size1126 = 0;
+            $_etype1129 = 0;
+            $xfer += $input->readListBegin($_etype1129, $_size1126);
+            for ($_i1130 = 0; $_i1130 < $_size1126; ++$_i1130)
             {
-              $elem1155 = null;
-              $elem1155 = new \metastore\Partition();
-              $xfer += $elem1155->read($input);
-              $this->success []= $elem1155;
+              $elem1131 = null;
+              $elem1131 = new \metastore\Partition();
+              $xfer += $elem1131->read($input);
+              $this->success []= $elem1131;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31795,9 +31743,9 @@ class ThriftHiveMetastore_get_partitions_ps_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1156)
+          foreach ($this->success as $iter1132)
           {
-            $xfer += $iter1156->write($output);
+            $xfer += $iter1132->write($output);
           }
         }
         $output->writeListEnd();
@@ -31944,14 +31892,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1157 = 0;
-            $_etype1160 = 0;
-            $xfer += $input->readListBegin($_etype1160, $_size1157);
-            for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161)
+            $_size1133 = 0;
+            $_etype1136 = 0;
+            $xfer += $input->readListBegin($_etype1136, $_size1133);
+            for ($_i1137 = 0; $_i1137 < $_size1133; ++$_i1137)
             {
-              $elem1162 = null;
-              $xfer += $input->readString($elem1162);
-              $this->part_vals []= $elem1162;
+              $elem1138 = null;
+              $xfer += $input->readString($elem1138);
+              $this->part_vals []= $elem1138;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -31975,14 +31923,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
         case 6:
           if ($ftype == TType::LST) {
             $this->group_names = array();
-            $_size1163 = 0;
-            $_etype1166 = 0;
-            $xfer += $input->readListBegin($_etype1166, $_size1163);
-            for ($_i1167 = 0; $_i1167 < $_size1163; ++$_i1167)
+            $_size1139 = 0;
+            $_etype1142 = 0;
+            $xfer += $input->readListBegin($_etype1142, $_size1139);
+            for ($_i1143 = 0; $_i1143 < $_size1139; ++$_i1143)
             {
-              $elem1168 = null;
-              $xfer += $input->readString($elem1168);
-              $this->group_names []= $elem1168;
+              $elem1144 = null;
+              $xfer += $input->readString($elem1144);
+              $this->group_names []= $elem1144;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32020,9 +31968,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1169)
+          foreach ($this->part_vals as $iter1145)
           {
-            $xfer += $output->writeString($iter1169);
+            $xfer += $output->writeString($iter1145);
           }
         }
         $output->writeListEnd();
@@ -32047,9 +31995,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args {
       {
         $output->writeListBegin(TType::STRING, count($this->group_names));
         {
-          foreach ($this->group_names as $iter1170)
+          foreach ($this->group_names as $iter1146)
           {
-            $xfer += $output->writeString($iter1170);
+            $xfer += $output->writeString($iter1146);
           }
         }
         $output->writeListEnd();
@@ -32138,15 +32086,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1171 = 0;
-            $_etype1174 = 0;
-            $xfer += $input->readListBegin($_etype1174, $_size1171);
-            for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175)
+            $_size1147 = 0;
+            $_etype1150 = 0;
+            $xfer += $input->readListBegin($_etype1150, $_size1147);
+            for ($_i1151 = 0; $_i1151 < $_size1147; ++$_i1151)
             {
-              $elem1176 = null;
-              $elem1176 = new \metastore\Partition();
-              $xfer += $elem1176->read($input);
-              $this->success []= $elem1176;
+              $elem1152 = null;
+              $elem1152 = new \metastore\Partition();
+              $xfer += $elem1152->read($input);
+              $this->success []= $elem1152;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32190,9 +32138,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1177)
+          foreach ($this->success as $iter1153)
           {
-            $xfer += $iter1177->write($output);
+            $xfer += $iter1153->write($output);
           }
         }
         $output->writeListEnd();
@@ -32313,14 +32261,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1178 = 0;
-            $_etype1181 = 0;
-            $xfer += $input->readListBegin($_etype1181, $_size1178);
-            for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182)
+            $_size1154 = 0;
+            $_etype1157 = 0;
+            $xfer += $input->readListBegin($_etype1157, $_size1154);
+            for ($_i1158 = 0; $_i1158 < $_size1154; ++$_i1158)
             {
-              $elem1183 = null;
-              $xfer += $input->readString($elem1183);
-              $this->part_vals []= $elem1183;
+              $elem1159 = null;
+              $xfer += $input->readString($elem1159);
+              $this->part_vals []= $elem1159;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32365,9 +32313,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1184)
+          foreach ($this->part_vals as $iter1160)
           {
-            $xfer += $output->writeString($iter1184);
+            $xfer += $output->writeString($iter1160);
           }
         }
         $output->writeListEnd();
@@ -32460,14 +32408,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1185 = 0;
-            $_etype1188 = 0;
-            $xfer += $input->readListBegin($_etype1188, $_size1185);
-            for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189)
+            $_size1161 = 0;
+            $_etype1164 = 0;
+            $xfer += $input->readListBegin($_etype1164, $_size1161);
+            for ($_i1165 = 0; $_i1165 < $_size1161; ++$_i1165)
             {
-              $elem1190 = null;
-              $xfer += $input->readString($elem1190);
-              $this->success []= $elem1190;
+              $elem1166 = null;
+              $xfer += $input->readString($elem1166);
+              $this->success []= $elem1166;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32511,9 +32459,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result {
       {
         $output->writeListBegin(TType::STRING, count($this->success));
         {
-          foreach ($this->success as $iter1191)
+          foreach ($this->success as $iter1167)
           {
-            $xfer += $output->writeString($iter1191);
+            $xfer += $output->writeString($iter1167);
           }
         }
         $output->writeListEnd();
@@ -32756,15 +32704,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1192 = 0;
-            $_etype1195 = 0;
-            $xfer += $input->readListBegin($_etype1195, $_size1192);
-            for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196)
+            $_size1168 = 0;
+            $_etype1171 = 0;
+            $xfer += $input->readListBegin($_etype1171, $_size1168);
+            for ($_i1172 = 0; $_i1172 < $_size1168; ++$_i1172)
             {
-              $elem1197 = null;
-              $elem1197 = new \metastore\Partition();
-              $xfer += $elem1197->read($input);
-              $this->success []= $elem1197;
+              $elem1173 = null;
+              $elem1173 = new \metastore\Partition();
+              $xfer += $elem1173->read($input);
+              $this->success []= $elem1173;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -32808,9 +32756,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1198)
+          foreach ($this->success as $iter1174)
           {
-            $xfer += $iter1198->write($output);
+            $xfer += $iter1174->write($output);
           }
         }
         $output->writeListEnd();
@@ -33053,15 +33001,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1199 = 0;
-            $_etype1202 = 0;
-            $xfer += $input->readListBegin($_etype1202, $_size1199);
-            for ($_i1203 = 0; $_i1203 < $_size1199; ++$_i1203)
+            $_size1175 = 0;
+            $_etype1178 = 0;
+            $xfer += $input->readListBegin($_etype1178, $_size1175);
+            for ($_i1179 = 0; $_i1179 < $_size1175; ++$_i1179)
             {
-              $elem1204 = null;
-              $elem1204 = new \metastore\PartitionSpec();
-              $xfer += $elem1204->read($input);
-              $this->success []= $elem1204;
+              $elem1180 = null;
+              $elem1180 = new \metastore\PartitionSpec();
+              $xfer += $elem1180->read($input);
+              $this->success []= $elem1180;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33105,9 +33053,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1205)
+          foreach ($this->success as $iter1181)
           {
-            $xfer += $iter1205->write($output);
+            $xfer += $iter1181->write($output);
           }
         }
         $output->writeListEnd();
@@ -33673,14 +33621,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->names = array();
-            $_size1206 = 0;
-            $_etype1209 = 0;
-            $xfer += $input->readListBegin($_etype1209, $_size1206);
-            for ($_i1210 = 0; $_i1210 < $_size1206; ++$_i1210)
+            $_size1182 = 0;
+            $_etype1185 = 0;
+            $xfer += $input->readListBegin($_etype1185, $_size1182);
+            for ($_i1186 = 0; $_i1186 < $_size1182; ++$_i1186)
             {
-              $elem1211 = null;
-              $xfer += $input->readString($elem1211);
-              $this->names []= $elem1211;
+              $elem1187 = null;
+              $xfer += $input->readString($elem1187);
+              $this->names []= $elem1187;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33718,9 +33666,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args {
       {
         $output->writeListBegin(TType::STRING, count($this->names));
         {
-          foreach ($this->names as $iter1212)
+          foreach ($this->names as $iter1188)
           {
-            $xfer += $output->writeString($iter1212);
+            $xfer += $output->writeString($iter1188);
           }
         }
         $output->writeListEnd();
@@ -33809,15 +33757,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
         case 0:
           if ($ftype == TType::LST) {
             $this->success = array();
-            $_size1213 = 0;
-            $_etype1216 = 0;
-            $xfer += $input->readListBegin($_etype1216, $_size1213);
-            for ($_i1217 = 0; $_i1217 < $_size1213; ++$_i1217)
+            $_size1189 = 0;
+            $_etype1192 = 0;
+            $xfer += $input->readListBegin($_etype1192, $_size1189);
+            for ($_i1193 = 0; $_i1193 < $_size1189; ++$_i1193)
             {
-              $elem1218 = null;
-              $elem1218 = new \metastore\Partition();
-              $xfer += $elem1218->read($input);
-              $this->success []= $elem1218;
+              $elem1194 = null;
+              $elem1194 = new \metastore\Partition();
+              $xfer += $elem1194->read($input);
+              $this->success []= $elem1194;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -33861,9 +33809,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result {
       {
         $output->writeListBegin(TType::STRUCT, count($this->success));
         {
-          foreach ($this->success as $iter1219)
+          foreach ($this->success as $iter1195)
           {
-            $xfer += $iter1219->write($output);
+            $xfer += $iter1195->write($output);
           }
         }
         $output->writeListEnd();
@@ -34202,15 +34150,15 @@ class ThriftHiveMetastore_alter_partitions_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size1220 = 0;
-            $_etype1223 = 0;
-            $xfer += $input->readListBegin($_etype1223, $_size1220);
-            for ($_i1224 = 0; $_i1224 < $_size1220; ++$_i1224)
+            $_size1196 = 0;
+            $_etype1199 = 0;
+            $xfer += $input->readListBegin($_etype1199, $_size1196);
+            for ($_i1200 = 0; $_i1200 < $_size1196; ++$_i1200)
             {
-              $elem1225 = null;
-              $elem1225 = new \metastore\Partition();
-              $xfer += $elem1225->read($input);
-              $this->new_parts []= $elem1225;
+              $elem1201 = null;
+              $elem1201 = new \metastore\Partition();
+              $xfer += $elem1201->read($input);
+              $this->new_parts []= $elem1201;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34248,9 +34196,9 @@ class ThriftHiveMetastore_alter_partitions_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter1226)
+          foreach ($this->new_parts as $iter1202)
           {
-            $xfer += $iter1226->write($output);
+            $xfer += $iter1202->write($output);
           }
         }
         $output->writeListEnd();
@@ -34465,15 +34413,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->new_parts = array();
-            $_size1227 = 0;
-            $_etype1230 = 0;
-            $xfer += $input->readListBegin($_etype1230, $_size1227);
-            for ($_i1231 = 0; $_i1231 < $_size1227; ++$_i1231)
+            $_size1203 = 0;
+            $_etype1206 = 0;
+            $xfer += $input->readListBegin($_etype1206, $_size1203);
+            for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207)
             {
-              $elem1232 = null;
-              $elem1232 = new \metastore\Partition();
-              $xfer += $elem1232->read($input);
-              $this->new_parts []= $elem1232;
+              $elem1208 = null;
+              $elem1208 = new \metastore\Partition();
+              $xfer += $elem1208->read($input);
+              $this->new_parts []= $elem1208;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -34519,9 +34467,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args {
       {
         $output->writeListBegin(TType::STRUCT, count($this->new_parts));
         {
-          foreach ($this->new_parts as $iter1233)
+          foreach ($this->new_parts as $iter1209)
           {
-            $xfer += $iter1233->write($output);
+            $xfer += $iter1209->write($output);
           }
         }
         $output->writeListEnd();
@@ -34999,14 +34947,14 @@ class ThriftHiveMetastore_rename_partition_args {
         case 3:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1234 = 0;
-            $_etype1237 = 0;
-            $xfer += $input->readListBegin($_etype1237, $_size1234);
-            for ($_i1238 = 0; $_i1238 < $_size1234; ++$_i1238)
+            $_size1210 = 0;
+            $_etype1213 = 0;
+            $xfer += $input->readListBegin($_etype1213, $_size1210);
+            for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214)
             {
-              $elem1239 = null;
-              $xfer += $input->readString($elem1239);
-              $this->part_vals []= $elem1239;
+              $elem1215 = null;
+              $xfer += $input->readString($elem1215);
+              $this->part_vals []= $elem1215;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -35052,9 +35000,9 @@ class ThriftHiveMetastore_rename_partition_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1240)
+          foreach ($this->part_vals as $iter1216)
           {
-            $xfer += $output->writeString($iter1240);
+            $xfer += $output->writeString($iter1216);
           }
         }
         $output->writeListEnd();
@@ -35239,14 +35187,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
         case 1:
           if ($ftype == TType::LST) {
             $this->part_vals = array();
-            $_size1241 = 0;
-            $_etype1244 = 0;
-            $xfer += $input->readListBegin($_etype1244, $_size1241);
-            for ($_i1245 = 0; $_i1245 < $_size1241; ++$_i1245)
+            $_size1217 = 0;
+            $_etype1220 = 0;
+            $xfer += $input->readListBegin($_etype1220, $_size1217);
+            for ($_i1221 = 0; $_i1221 < $_size1217; ++$_i1221)
             {
-              $elem1246 = null;
-              $xfer += $input->readString($elem1246);
-              $this->part_vals []= $elem1246;
+              $elem1222 = null;
+              $xfer += $input->readString($elem1222);
+              $this->part_vals []= $elem1222;
             }
             $xfer += $input->readListEnd();
           } else {
@@ -35281,9 +35229,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args {
       {
         $output->writeListBegin(TType::STRING, count($this->part_vals));
         {
-          foreach ($this->part_vals as $iter1247)
+          foreach ($this->part_vals as $iter1223)
           {
-            $xfer += $output->writeString($iter1247);
+            $xfer += $output->writeString($iter1223);
           }
         }
         $output->writeListEnd();
@@ -35737,14 +35685,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result {
         case 0:
           if ($ftype 

<TRUNCATED>

[40/48] hive git commit: HIVE-20116: TezTask is using parent logger (Prasanth Jayachandran reviewed by Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-20116: TezTask is using parent logger (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/4fcf3d72
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/4fcf3d72
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/4fcf3d72

Branch: refs/heads/master-txnstats
Commit: 4fcf3d720855bef3939ac537ff4a4e5616b493fb
Parents: c1337df
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Tue Jul 17 11:11:56 2018 -0700
Committer: Prasanth Jayachandran <pr...@apache.org>
Committed: Tue Jul 17 11:11:56 2018 -0700

----------------------------------------------------------------------
 ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java    | 4 ++++
 .../java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java  | 6 ++++--
 2 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/4fcf3d72/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
index a15482f..f2ed07a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java
@@ -84,6 +84,9 @@ import org.apache.tez.dag.api.client.DAGStatus;
 import org.apache.tez.dag.api.client.StatusGetOpts;
 import org.apache.tez.dag.api.client.VertexStatus;
 import org.json.JSONObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import com.google.common.annotations.VisibleForTesting;
 
 /**
@@ -96,6 +99,7 @@ import com.google.common.annotations.VisibleForTesting;
 public class TezTask extends Task<TezWork> {
 
   private static final String CLASS_NAME = TezTask.class.getName();
+  private static transient Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
   private final PerfLogger perfLogger = SessionState.getPerfLogger();
   private static final String TEZ_MEMORY_RESERVE_FRACTION = "tez.task.scale.memory.reserve-fraction";
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4fcf3d72/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
index b5e8b95..0105fd5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/log/LogDivertAppender.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.common.LogUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.exec.tez.TezTask;
 import org.apache.hadoop.hive.ql.session.OperationLog;
 import org.apache.logging.log4j.LogManager;
 import org.apache.logging.log4j.core.LogEvent;
@@ -80,8 +81,9 @@ public class LogDivertAppender {
      */
     private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
         join(new String[]{"org.apache.hadoop.mapreduce.JobSubmitter",
-            "org.apache.hadoop.mapreduce.Job", "SessionState", "ReplState", Task.class.getName(),
-            Driver.class.getName(), "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
+          "org.apache.hadoop.mapreduce.Job", "SessionState", "ReplState", Task.class.getName(),
+          TezTask.class.getName(), Driver.class.getName(),
+          "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
 
     /* Patterns that are included in performance logging level.
      * In performance mode, show execution and performance logger messages.


[08/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query69.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query69.q.out b/ql/src/test/results/clientpositive/perf/tez/query69.q.out
index a9c7ac3..aad5b81 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query69.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query69.q.out
@@ -117,16 +117,16 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_230]
-        Limit [LIM_229] (rows=100 width=88)
+      File Output Operator [FS_232]
+        Limit [LIM_231] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_228] (rows=191662559 width=88)
+          Select Operator [SEL_230] (rows=191662559 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_227]
-              Select Operator [SEL_226] (rows=191662559 width=88)
+            SHUFFLE [RS_229]
+              Select Operator [SEL_228] (rows=191662559 width=88)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col6"]
-                Group By Operator [GBY_225] (rows=191662559 width=88)
+                Group By Operator [GBY_227] (rows=191662559 width=88)
                   Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2, KEY._col3, KEY._col4
                 <-Reducer 5 [SIMPLE_EDGE]
                   SHUFFLE [RS_67]
@@ -137,51 +137,51 @@ Stage-0
                         Output:["_col6","_col7","_col8","_col9","_col10"]
                         Filter Operator [FIL_64] (rows=383325119 width=88)
                           predicate:_col14 is null
-                          Merge Join Operator [MERGEJOIN_178] (rows=766650239 width=88)
-                            Conds:RS_61._col0=RS_224._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col14"]
+                          Merge Join Operator [MERGEJOIN_180] (rows=766650239 width=88)
+                            Conds:RS_61._col0=RS_226._col0(Left Outer),Output:["_col6","_col7","_col8","_col9","_col10","_col14"]
                           <-Reducer 19 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_224]
+                            FORWARD [RS_226]
                               PartitionCols:_col0
-                              Select Operator [SEL_223] (rows=158394413 width=135)
+                              Select Operator [SEL_225] (rows=158394413 width=135)
                                 Output:["_col0","_col1"]
-                                Group By Operator [GBY_222] (rows=158394413 width=135)
+                                Group By Operator [GBY_224] (rows=158394413 width=135)
                                   Output:["_col0"],keys:KEY._col0
                                 <-Reducer 18 [SIMPLE_EDGE]
                                   SHUFFLE [RS_58]
                                     PartitionCols:_col0
                                     Group By Operator [GBY_57] (rows=316788826 width=135)
                                       Output:["_col0"],keys:_col1
-                                      Merge Join Operator [MERGEJOIN_176] (rows=316788826 width=135)
-                                        Conds:RS_221._col0=RS_194._col0(Inner),Output:["_col1"]
+                                      Merge Join Operator [MERGEJOIN_178] (rows=316788826 width=135)
+                                        Conds:RS_223._col0=RS_196._col0(Inner),Output:["_col1"]
                                       <-Map 13 [SIMPLE_EDGE] vectorized
-                                        PARTITION_ONLY_SHUFFLE [RS_194]
+                                        PARTITION_ONLY_SHUFFLE [RS_196]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_189] (rows=4058 width=1119)
+                                          Select Operator [SEL_191] (rows=4058 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_188] (rows=4058 width=1119)
+                                            Filter Operator [FIL_190] (rows=4058 width=1119)
                                               predicate:((d_year = 1999) and d_date_sk is not null and d_moy BETWEEN 1 AND 3)
                                               TableScan [TS_12] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                       <-Map 22 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_221]
+                                        SHUFFLE [RS_223]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_220] (rows=287989836 width=135)
+                                          Select Operator [SEL_222] (rows=287989836 width=135)
                                             Output:["_col0","_col1"]
-                                            Filter Operator [FIL_219] (rows=287989836 width=135)
+                                            Filter Operator [FIL_221] (rows=287989836 width=135)
                                               predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_54_date_dim_d_date_sk_min) AND DynamicValue(RS_54_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_54_date_dim_d_date_sk_bloom_filter))) and cs_ship_customer_sk is not null and cs_sold_date_sk is not null)
                                               TableScan [TS_47] (rows=287989836 width=135)
                                                 default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_customer_sk"]
                                               <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_218]
-                                                  Group By Operator [GBY_217] (rows=1 width=12)
+                                                BROADCAST [RS_220]
+                                                  Group By Operator [GBY_219] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    PARTITION_ONLY_SHUFFLE [RS_201]
-                                                      Group By Operator [GBY_198] (rows=1 width=12)
+                                                    PARTITION_ONLY_SHUFFLE [RS_203]
+                                                      Group By Operator [GBY_200] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_195] (rows=4058 width=1119)
+                                                        Select Operator [SEL_197] (rows=4058 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_189]
+                                                           Please refer to the previous Select Operator [SEL_191]
                           <-Reducer 4 [ONE_TO_ONE_EDGE]
                             FORWARD [RS_61]
                               PartitionCols:_col0
@@ -189,42 +189,42 @@ Stage-0
                                 Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
                                 Filter Operator [FIL_45] (rows=696954748 width=88)
                                   predicate:_col12 is null
-                                  Merge Join Operator [MERGEJOIN_177] (rows=1393909496 width=88)
-                                    Conds:RS_41._col0=RS_42._col0(Left Semi),RS_41._col0=RS_216._col0(Left Outer),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col12"]
+                                  Merge Join Operator [MERGEJOIN_179] (rows=1393909496 width=88)
+                                    Conds:RS_41._col0=RS_42._col0(Left Semi),RS_41._col0=RS_218._col0(Left Outer),Output:["_col0","_col6","_col7","_col8","_col9","_col10","_col12"]
                                   <-Reducer 3 [SIMPLE_EDGE]
                                     PARTITION_ONLY_SHUFFLE [RS_41]
                                       PartitionCols:_col0
-                                      Merge Join Operator [MERGEJOIN_173] (rows=96800003 width=860)
-                                        Conds:RS_36._col1=RS_187._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
+                                      Merge Join Operator [MERGEJOIN_175] (rows=96800003 width=860)
+                                        Conds:RS_36._col1=RS_189._col0(Inner),Output:["_col0","_col6","_col7","_col8","_col9","_col10"]
                                       <-Map 10 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_187]
+                                        SHUFFLE [RS_189]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_186] (rows=1861800 width=385)
+                                          Select Operator [SEL_188] (rows=1861800 width=385)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                                            Filter Operator [FIL_185] (rows=1861800 width=385)
+                                            Filter Operator [FIL_187] (rows=1861800 width=385)
                                               predicate:cd_demo_sk is not null
                                               TableScan [TS_6] (rows=1861800 width=385)
                                                 default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_gender","cd_marital_status","cd_education_status","cd_purchase_estimate","cd_credit_rating"]
                                       <-Reducer 2 [SIMPLE_EDGE]
                                         SHUFFLE [RS_36]
                                           PartitionCols:_col1
-                                          Merge Join Operator [MERGEJOIN_172] (rows=88000001 width=860)
-                                            Conds:RS_181._col2=RS_184._col0(Inner),Output:["_col0","_col1"]
+                                          Merge Join Operator [MERGEJOIN_174] (rows=88000001 width=860)
+                                            Conds:RS_183._col2=RS_186._col0(Inner),Output:["_col0","_col1"]
                                           <-Map 1 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_181]
+                                            SHUFFLE [RS_183]
                                               PartitionCols:_col2
-                                              Select Operator [SEL_180] (rows=80000000 width=860)
+                                              Select Operator [SEL_182] (rows=80000000 width=860)
                                                 Output:["_col0","_col1","_col2"]
-                                                Filter Operator [FIL_179] (rows=80000000 width=860)
+                                                Filter Operator [FIL_181] (rows=80000000 width=860)
                                                   predicate:(c_current_addr_sk is not null and c_current_cdemo_sk is not null and c_customer_sk is not null)
                                                   TableScan [TS_0] (rows=80000000 width=860)
                                                     default@customer,c,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_cdemo_sk","c_current_addr_sk"]
                                           <-Map 9 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_184]
+                                            SHUFFLE [RS_186]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_183] (rows=20000000 width=1014)
+                                              Select Operator [SEL_185] (rows=20000000 width=1014)
                                                 Output:["_col0"]
-                                                Filter Operator [FIL_182] (rows=20000000 width=1014)
+                                                Filter Operator [FIL_184] (rows=20000000 width=1014)
                                                   predicate:((ca_state) IN ('CO', 'IL', 'MN') and ca_address_sk is not null)
                                                   TableScan [TS_3] (rows=40000000 width=1014)
                                                     default@customer_address,ca,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_state"]
@@ -235,35 +235,35 @@ Stage-0
                                         Output:["_col0"],keys:_col0
                                         Select Operator [SEL_18] (rows=633595212 width=88)
                                           Output:["_col0"]
-                                          Merge Join Operator [MERGEJOIN_174] (rows=633595212 width=88)
-                                            Conds:RS_208._col0=RS_190._col0(Inner),Output:["_col1"]
+                                          Merge Join Operator [MERGEJOIN_176] (rows=633595212 width=88)
+                                            Conds:RS_210._col0=RS_192._col0(Inner),Output:["_col1"]
                                           <-Map 13 [SIMPLE_EDGE] vectorized
-                                            PARTITION_ONLY_SHUFFLE [RS_190]
+                                            PARTITION_ONLY_SHUFFLE [RS_192]
                                               PartitionCols:_col0
-                                               Please refer to the previous Select Operator [SEL_189]
+                                               Please refer to the previous Select Operator [SEL_191]
                                           <-Map 11 [SIMPLE_EDGE] vectorized
-                                            SHUFFLE [RS_208]
+                                            SHUFFLE [RS_210]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_207] (rows=575995635 width=88)
+                                              Select Operator [SEL_209] (rows=575995635 width=88)
                                                 Output:["_col0","_col1"]
-                                                Filter Operator [FIL_206] (rows=575995635 width=88)
+                                                Filter Operator [FIL_208] (rows=575995635 width=88)
                                                   predicate:((ss_customer_sk BETWEEN DynamicValue(RS_41_c_c_customer_sk_min) AND DynamicValue(RS_41_c_c_customer_sk_max) and in_bloom_filter(ss_customer_sk, DynamicValue(RS_41_c_c_customer_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
                                                   TableScan [TS_9] (rows=575995635 width=88)
                                                     default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk"]
                                                   <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_203]
-                                                      Group By Operator [GBY_202] (rows=1 width=12)
+                                                    BROADCAST [RS_205]
+                                                      Group By Operator [GBY_204] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                       <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_199]
-                                                          Group By Operator [GBY_196] (rows=1 width=12)
+                                                        PARTITION_ONLY_SHUFFLE [RS_201]
+                                                          Group By Operator [GBY_198] (rows=1 width=12)
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                            Select Operator [SEL_191] (rows=4058 width=1119)
+                                                            Select Operator [SEL_193] (rows=4058 width=1119)
                                                               Output:["_col0"]
-                                                               Please refer to the previous Select Operator [SEL_189]
+                                                               Please refer to the previous Select Operator [SEL_191]
                                                   <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                                    BROADCAST [RS_205]
-                                                      Group By Operator [GBY_204] (rows=1 width=12)
+                                                    BROADCAST [RS_207]
+                                                      Group By Operator [GBY_206] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=96800000)"]
                                                       <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
                                                         PARTITION_ONLY_SHUFFLE [RS_137]
@@ -271,43 +271,43 @@ Stage-0
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=96800000)"]
                                                             Select Operator [SEL_135] (rows=96800003 width=860)
                                                               Output:["_col0"]
-                                                               Please refer to the previous Merge Join Operator [MERGEJOIN_173]
+                                                               Please refer to the previous Merge Join Operator [MERGEJOIN_175]
                                   <-Reducer 16 [ONE_TO_ONE_EDGE] vectorized
-                                    FORWARD [RS_216]
+                                    FORWARD [RS_218]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_215] (rows=79201469 width=135)
+                                      Select Operator [SEL_217] (rows=79201469 width=135)
                                         Output:["_col0","_col1"]
-                                        Group By Operator [GBY_214] (rows=79201469 width=135)
+                                        Group By Operator [GBY_216] (rows=79201469 width=135)
                                           Output:["_col0"],keys:KEY._col0
                                         <-Reducer 15 [SIMPLE_EDGE]
                                           SHUFFLE [RS_30]
                                             PartitionCols:_col0
                                             Group By Operator [GBY_29] (rows=158402938 width=135)
                                               Output:["_col0"],keys:_col1
-                                              Merge Join Operator [MERGEJOIN_175] (rows=158402938 width=135)
-                                                Conds:RS_213._col0=RS_192._col0(Inner),Output:["_col1"]
+                                              Merge Join Operator [MERGEJOIN_177] (rows=158402938 width=135)
+                                                Conds:RS_215._col0=RS_194._col0(Inner),Output:["_col1"]
                                               <-Map 13 [SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_192]
+                                                PARTITION_ONLY_SHUFFLE [RS_194]
                                                   PartitionCols:_col0
-                                                   Please refer to the previous Select Operator [SEL_189]
+                                                   Please refer to the previous Select Operator [SEL_191]
                                               <-Map 21 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_213]
+                                                SHUFFLE [RS_215]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_212] (rows=144002668 width=135)
+                                                  Select Operator [SEL_214] (rows=144002668 width=135)
                                                     Output:["_col0","_col1"]
-                                                    Filter Operator [FIL_211] (rows=144002668 width=135)
+                                                    Filter Operator [FIL_213] (rows=144002668 width=135)
                                                       predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_26_date_dim_d_date_sk_min) AND DynamicValue(RS_26_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_26_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_sold_date_sk is not null)
                                                       TableScan [TS_19] (rows=144002668 width=135)
                                                         default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_bill_customer_sk"]
                                                       <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                                        BROADCAST [RS_210]
-                                                          Group By Operator [GBY_209] (rows=1 width=12)
+                                                        BROADCAST [RS_212]
+                                                          Group By Operator [GBY_211] (rows=1 width=12)
                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                           <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                            PARTITION_ONLY_SHUFFLE [RS_200]
-                                                              Group By Operator [GBY_197] (rows=1 width=12)
+                                                            PARTITION_ONLY_SHUFFLE [RS_202]
+                                                              Group By Operator [GBY_199] (rows=1 width=12)
                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                Select Operator [SEL_193] (rows=4058 width=1119)
+                                                                Select Operator [SEL_195] (rows=4058 width=1119)
                                                                   Output:["_col0"]
-                                                                   Please refer to the previous Select Operator [SEL_189]
+                                                                   Please refer to the previous Select Operator [SEL_191]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query72.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query72.q.out b/ql/src/test/results/clientpositive/perf/tez/query72.q.out
index 48682e3..65a60ea 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query72.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query72.q.out
@@ -86,14 +86,14 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_311]
-        Limit [LIM_310] (rows=100 width=135)
+      File Output Operator [FS_315]
+        Limit [LIM_314] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_309] (rows=37725837 width=135)
+          Select Operator [SEL_313] (rows=37725837 width=135)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_308]
-              Group By Operator [GBY_307] (rows=37725837 width=135)
+            SHUFFLE [RS_312]
+              Group By Operator [GBY_311] (rows=37725837 width=135)
                 Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(VALUE._col0)","count(VALUE._col1)","count(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_69]
@@ -102,14 +102,14 @@ Stage-0
                     Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["count(_col3)","count(_col4)","count()"],keys:_col0, _col1, _col2
                     Select Operator [SEL_66] (rows=75451675 width=135)
                       Output:["_col0","_col1","_col2","_col3","_col4"]
-                      Merge Join Operator [MERGEJOIN_247] (rows=75451675 width=135)
-                        Conds:RS_63._col4, _col6=RS_306._col0, _col1(Left Outer),Output:["_col13","_col15","_col22","_col28"]
+                      Merge Join Operator [MERGEJOIN_251] (rows=75451675 width=135)
+                        Conds:RS_63._col4, _col6=RS_310._col0, _col1(Left Outer),Output:["_col13","_col15","_col22","_col28"]
                       <-Map 29 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_306]
+                        SHUFFLE [RS_310]
                           PartitionCols:_col0, _col1
-                          Select Operator [SEL_305] (rows=28798881 width=106)
+                          Select Operator [SEL_309] (rows=28798881 width=106)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_304] (rows=28798881 width=106)
+                            Filter Operator [FIL_308] (rows=28798881 width=106)
                               predicate:cr_item_sk is not null
                               TableScan [TS_60] (rows=28798881 width=106)
                                 default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number"]
@@ -118,14 +118,14 @@ Stage-0
                           PartitionCols:_col4, _col6
                           Select Operator [SEL_59] (rows=68592431 width=135)
                             Output:["_col4","_col6","_col13","_col15","_col22","_col28"]
-                            Merge Join Operator [MERGEJOIN_246] (rows=68592431 width=135)
-                              Conds:RS_56._col0, _col20=RS_303._col0, _col1(Inner),Output:["_col5","_col9","_col14","_col16","_col20","_col26"]
+                            Merge Join Operator [MERGEJOIN_250] (rows=68592431 width=135)
+                              Conds:RS_56._col0, _col20=RS_307._col0, _col1(Inner),Output:["_col5","_col9","_col14","_col16","_col20","_col26"]
                             <-Map 28 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_303]
+                              SHUFFLE [RS_307]
                                 PartitionCols:_col0, _col1
-                                Select Operator [SEL_302] (rows=73049 width=1119)
+                                Select Operator [SEL_306] (rows=73049 width=1119)
                                   Output:["_col0","_col1"]
-                                  Filter Operator [FIL_301] (rows=73049 width=1119)
+                                  Filter Operator [FIL_305] (rows=73049 width=1119)
                                     predicate:(d_date_sk is not null and d_week_seq is not null)
                                     TableScan [TS_46] (rows=73049 width=1119)
                                       default@date_dim,d2,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_week_seq"]
@@ -134,28 +134,28 @@ Stage-0
                                 PartitionCols:_col0, _col20
                                 Filter Operator [FIL_55] (rows=62356755 width=135)
                                   predicate:(_col3 < _col17)
-                                  Merge Join Operator [MERGEJOIN_245] (rows=187070265 width=135)
+                                  Merge Join Operator [MERGEJOIN_249] (rows=187070265 width=135)
                                     Conds:RS_52._col1=RS_53._col8(Inner),Output:["_col0","_col3","_col5","_col9","_col14","_col16","_col17","_col20","_col26"]
                                   <-Reducer 2 [SIMPLE_EDGE]
                                     PARTITION_ONLY_SHUFFLE [RS_52]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_238] (rows=41342400 width=15)
-                                        Conds:RS_250._col2=RS_253._col0(Inner),Output:["_col0","_col1","_col3","_col5"]
+                                      Merge Join Operator [MERGEJOIN_242] (rows=41342400 width=15)
+                                        Conds:RS_254._col2=RS_257._col0(Inner),Output:["_col0","_col1","_col3","_col5"]
                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_250]
+                                        SHUFFLE [RS_254]
                                           PartitionCols:_col2
-                                          Select Operator [SEL_249] (rows=37584000 width=15)
+                                          Select Operator [SEL_253] (rows=37584000 width=15)
                                             Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_248] (rows=37584000 width=15)
+                                            Filter Operator [FIL_252] (rows=37584000 width=15)
                                               predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_warehouse_sk is not null)
                                               TableScan [TS_0] (rows=37584000 width=15)
                                                 default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                       <-Map 9 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_253]
+                                        SHUFFLE [RS_257]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_252] (rows=27 width=1029)
+                                          Select Operator [SEL_256] (rows=27 width=1029)
                                             Output:["_col0","_col1"]
-                                            Filter Operator [FIL_251] (rows=27 width=1029)
+                                            Filter Operator [FIL_255] (rows=27 width=1029)
                                               predicate:w_warehouse_sk is not null
                                               TableScan [TS_3] (rows=27 width=1029)
                                                 default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name"]
@@ -166,152 +166,152 @@ Stage-0
                                         Output:["_col3","_col8","_col10","_col11","_col14","_col20"]
                                         Filter Operator [FIL_44] (rows=170063874 width=135)
                                           predicate:(UDFToDouble(_col20) > (UDFToDouble(_col9) + 5.0D))
-                                          Merge Join Operator [MERGEJOIN_244] (rows=510191624 width=135)
-                                            Conds:RS_41._col1=RS_290._col0(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col16","_col18","_col20"]
+                                          Merge Join Operator [MERGEJOIN_248] (rows=510191624 width=135)
+                                            Conds:RS_41._col1=RS_294._col0(Inner),Output:["_col4","_col6","_col7","_col9","_col10","_col16","_col18","_col20"]
                                           <-Map 26 [SIMPLE_EDGE] vectorized
-                                            PARTITION_ONLY_SHUFFLE [RS_290]
+                                            PARTITION_ONLY_SHUFFLE [RS_294]
                                               PartitionCols:_col0
-                                              Select Operator [SEL_289] (rows=73049 width=1119)
+                                              Select Operator [SEL_293] (rows=73049 width=1119)
                                                 Output:["_col0","_col1"]
-                                                Filter Operator [FIL_288] (rows=73049 width=1119)
+                                                Filter Operator [FIL_292] (rows=73049 width=1119)
                                                   predicate:d_date_sk is not null
                                                   TableScan [TS_23] (rows=73049 width=1119)
                                                     default@date_dim,d3,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                           <-Reducer 15 [SIMPLE_EDGE]
                                             SHUFFLE [RS_41]
                                               PartitionCols:_col1
-                                              Merge Join Operator [MERGEJOIN_243] (rows=463810558 width=135)
-                                                Conds:RS_38._col4=RS_280._col0(Inner),Output:["_col1","_col4","_col6","_col7","_col9","_col10","_col16","_col18"]
+                                              Merge Join Operator [MERGEJOIN_247] (rows=463810558 width=135)
+                                                Conds:RS_38._col4=RS_284._col0(Inner),Output:["_col1","_col4","_col6","_col7","_col9","_col10","_col16","_col18"]
                                               <-Map 24 [SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_280]
+                                                PARTITION_ONLY_SHUFFLE [RS_284]
                                                   PartitionCols:_col0
-                                                  Select Operator [SEL_279] (rows=462000 width=1436)
+                                                  Select Operator [SEL_283] (rows=462000 width=1436)
                                                     Output:["_col0","_col1"]
-                                                    Filter Operator [FIL_278] (rows=462000 width=1436)
+                                                    Filter Operator [FIL_282] (rows=462000 width=1436)
                                                       predicate:i_item_sk is not null
                                                       TableScan [TS_20] (rows=462000 width=1436)
                                                         default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_desc"]
                                               <-Reducer 14 [SIMPLE_EDGE]
                                                 SHUFFLE [RS_38]
                                                   PartitionCols:_col4
-                                                  Merge Join Operator [MERGEJOIN_242] (rows=421645953 width=135)
-                                                    Conds:RS_35._col5=RS_300._col0(Left Outer),Output:["_col1","_col4","_col6","_col7","_col9","_col10","_col16"]
+                                                  Merge Join Operator [MERGEJOIN_246] (rows=421645953 width=135)
+                                                    Conds:RS_35._col5=RS_304._col0(Left Outer),Output:["_col1","_col4","_col6","_col7","_col9","_col10","_col16"]
                                                   <-Map 23 [SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_300]
+                                                    SHUFFLE [RS_304]
                                                       PartitionCols:_col0
-                                                      Select Operator [SEL_299] (rows=2300 width=1179)
+                                                      Select Operator [SEL_303] (rows=2300 width=1179)
                                                         Output:["_col0"]
                                                         TableScan [TS_18] (rows=2300 width=1179)
                                                           default@promotion,promotion,Tbl:COMPLETE,Col:NONE,Output:["p_promo_sk"]
                                                   <-Reducer 13 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_35]
                                                       PartitionCols:_col5
-                                                      Merge Join Operator [MERGEJOIN_241] (rows=383314495 width=135)
-                                                        Conds:RS_32._col3=RS_272._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col9","_col10"]
+                                                      Merge Join Operator [MERGEJOIN_245] (rows=383314495 width=135)
+                                                        Conds:RS_32._col3=RS_276._col0(Inner),Output:["_col1","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                       <-Map 21 [SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_272]
+                                                        PARTITION_ONLY_SHUFFLE [RS_276]
                                                           PartitionCols:_col0
-                                                          Select Operator [SEL_271] (rows=3600 width=107)
+                                                          Select Operator [SEL_275] (rows=3600 width=107)
                                                             Output:["_col0"]
-                                                            Filter Operator [FIL_270] (rows=3600 width=107)
+                                                            Filter Operator [FIL_274] (rows=3600 width=107)
                                                               predicate:((hd_buy_potential = '1001-5000') and hd_demo_sk is not null)
                                                               TableScan [TS_15] (rows=7200 width=107)
                                                                 default@household_demographics,household_demographics,Tbl:COMPLETE,Col:NONE,Output:["hd_demo_sk","hd_buy_potential"]
                                                       <-Reducer 12 [SIMPLE_EDGE]
                                                         SHUFFLE [RS_32]
                                                           PartitionCols:_col3
-                                                          Merge Join Operator [MERGEJOIN_240] (rows=348467716 width=135)
-                                                            Conds:RS_29._col2=RS_264._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
+                                                          Merge Join Operator [MERGEJOIN_244] (rows=348467716 width=135)
+                                                            Conds:RS_29._col2=RS_268._col0(Inner),Output:["_col1","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                           <-Map 19 [SIMPLE_EDGE] vectorized
-                                                            PARTITION_ONLY_SHUFFLE [RS_264]
+                                                            PARTITION_ONLY_SHUFFLE [RS_268]
                                                               PartitionCols:_col0
-                                                              Select Operator [SEL_263] (rows=930900 width=385)
+                                                              Select Operator [SEL_267] (rows=930900 width=385)
                                                                 Output:["_col0"]
-                                                                Filter Operator [FIL_262] (rows=930900 width=385)
+                                                                Filter Operator [FIL_266] (rows=930900 width=385)
                                                                   predicate:((cd_marital_status = 'M') and cd_demo_sk is not null)
                                                                   TableScan [TS_12] (rows=1861800 width=385)
                                                                     default@customer_demographics,customer_demographics,Tbl:COMPLETE,Col:NONE,Output:["cd_demo_sk","cd_marital_status"]
                                                           <-Reducer 11 [SIMPLE_EDGE]
                                                             SHUFFLE [RS_29]
                                                               PartitionCols:_col2
-                                                              Merge Join Operator [MERGEJOIN_239] (rows=316788826 width=135)
-                                                                Conds:RS_298._col0=RS_256._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
+                                                              Merge Join Operator [MERGEJOIN_243] (rows=316788826 width=135)
+                                                                Conds:RS_302._col0=RS_260._col0(Inner),Output:["_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10"]
                                                               <-Map 17 [SIMPLE_EDGE] vectorized
-                                                                PARTITION_ONLY_SHUFFLE [RS_256]
+                                                                PARTITION_ONLY_SHUFFLE [RS_260]
                                                                   PartitionCols:_col0
-                                                                  Select Operator [SEL_255] (rows=36524 width=1119)
+                                                                  Select Operator [SEL_259] (rows=36524 width=1119)
                                                                     Output:["_col0","_col1","_col2"]
-                                                                    Filter Operator [FIL_254] (rows=36524 width=1119)
+                                                                    Filter Operator [FIL_258] (rows=36524 width=1119)
                                                                       predicate:((d_year = 2001) and d_date_sk is not null and d_week_seq is not null)
                                                                       TableScan [TS_9] (rows=73049 width=1119)
                                                                         default@date_dim,d1,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date","d_week_seq","d_year"]
                                                               <-Map 10 [SIMPLE_EDGE] vectorized
-                                                                SHUFFLE [RS_298]
+                                                                SHUFFLE [RS_302]
                                                                   PartitionCols:_col0
-                                                                  Select Operator [SEL_297] (rows=287989836 width=135)
+                                                                  Select Operator [SEL_301] (rows=287989836 width=135)
                                                                     Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                                                                    Filter Operator [FIL_296] (rows=287989836 width=135)
+                                                                    Filter Operator [FIL_300] (rows=287989836 width=135)
                                                                       predicate:((cs_bill_cdemo_sk BETWEEN DynamicValue(RS_30_customer_demographics_cd_demo_sk_min) AND DynamicValue(RS_30_customer_demographics_cd_demo_sk_max) and in_bloom_filter(cs_bill_cdemo_sk, DynamicValue(RS_30_customer_demographics_cd_demo_sk_bloom_filter))) and (cs_bill_hdemo_sk BETWEEN DynamicValue(RS_33_household_demographics_hd_demo_sk_min) AND DynamicValue(RS_33_household_demographics_hd_demo_sk_max) and in_bloom_filter(cs_bill_hdemo_sk, DynamicValue(RS_33_household_demographics_hd_demo_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_39_item_i_item_sk_min) AND DynamicValue(RS_39_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_39_item_i_item_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_52_inventory_inv_item_sk_min) AND DynamicValue(RS_52_inventory_inv_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_52_inventory_inv_item_sk_bloom_filter))) and (cs_ship
 _date_sk BETWEEN DynamicValue(RS_42_d3_d_date_sk_min) AND DynamicValue(RS_42_d3_d_date_sk_max) and in_bloom_filter(cs_ship_date_sk, DynamicValue(RS_42_d3_d_date_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_27_d1_d_date_sk_min) AND DynamicValue(RS_27_d1_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_27_d1_d_date_sk_bloom_filter))) and cs_bill_cdemo_sk is not null and cs_bill_hdemo_sk is not null and cs_item_sk is not null and cs_ship_date_sk is not null and cs_sold_date_sk is not null)
                                                                       TableScan [TS_6] (rows=287989836 width=135)
                                                                         default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_ship_date_sk","cs_bill_cdemo_sk","cs_bill_hdemo_sk","cs_item_sk","cs_promo_sk","cs_order_number","cs_quantity"]
                                                                       <-Reducer 18 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_261]
-                                                                          Group By Operator [GBY_260] (rows=1 width=12)
+                                                                        BROADCAST [RS_265]
+                                                                          Group By Operator [GBY_264] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                           <-Map 17 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_259]
-                                                                              Group By Operator [GBY_258] (rows=1 width=12)
+                                                                            PARTITION_ONLY_SHUFFLE [RS_263]
+                                                                              Group By Operator [GBY_262] (rows=1 width=12)
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                Select Operator [SEL_257] (rows=36524 width=1119)
+                                                                                Select Operator [SEL_261] (rows=36524 width=1119)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Select Operator [SEL_255]
+                                                                                   Please refer to the previous Select Operator [SEL_259]
                                                                       <-Reducer 20 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_269]
-                                                                          Group By Operator [GBY_268] (rows=1 width=12)
+                                                                        BROADCAST [RS_273]
+                                                                          Group By Operator [GBY_272] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                           <-Map 19 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_267]
-                                                                              Group By Operator [GBY_266] (rows=1 width=12)
+                                                                            PARTITION_ONLY_SHUFFLE [RS_271]
+                                                                              Group By Operator [GBY_270] (rows=1 width=12)
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                Select Operator [SEL_265] (rows=930900 width=385)
+                                                                                Select Operator [SEL_269] (rows=930900 width=385)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Select Operator [SEL_263]
+                                                                                   Please refer to the previous Select Operator [SEL_267]
                                                                       <-Reducer 22 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_277]
-                                                                          Group By Operator [GBY_276] (rows=1 width=12)
+                                                                        BROADCAST [RS_281]
+                                                                          Group By Operator [GBY_280] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                           <-Map 21 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_275]
-                                                                              Group By Operator [GBY_274] (rows=1 width=12)
+                                                                            PARTITION_ONLY_SHUFFLE [RS_279]
+                                                                              Group By Operator [GBY_278] (rows=1 width=12)
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                Select Operator [SEL_273] (rows=3600 width=107)
+                                                                                Select Operator [SEL_277] (rows=3600 width=107)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Select Operator [SEL_271]
+                                                                                   Please refer to the previous Select Operator [SEL_275]
                                                                       <-Reducer 25 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_285]
-                                                                          Group By Operator [GBY_284] (rows=1 width=12)
+                                                                        BROADCAST [RS_289]
+                                                                          Group By Operator [GBY_288] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                           <-Map 24 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_283]
-                                                                              Group By Operator [GBY_282] (rows=1 width=12)
+                                                                            PARTITION_ONLY_SHUFFLE [RS_287]
+                                                                              Group By Operator [GBY_286] (rows=1 width=12)
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                Select Operator [SEL_281] (rows=462000 width=1436)
+                                                                                Select Operator [SEL_285] (rows=462000 width=1436)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Select Operator [SEL_279]
+                                                                                   Please refer to the previous Select Operator [SEL_283]
                                                                       <-Reducer 27 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_295]
-                                                                          Group By Operator [GBY_294] (rows=1 width=12)
+                                                                        BROADCAST [RS_299]
+                                                                          Group By Operator [GBY_298] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                           <-Map 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_293]
-                                                                              Group By Operator [GBY_292] (rows=1 width=12)
+                                                                            PARTITION_ONLY_SHUFFLE [RS_297]
+                                                                              Group By Operator [GBY_296] (rows=1 width=12)
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                Select Operator [SEL_291] (rows=73049 width=1119)
+                                                                                Select Operator [SEL_295] (rows=73049 width=1119)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Select Operator [SEL_289]
+                                                                                   Please refer to the previous Select Operator [SEL_293]
                                                                       <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                                                        BROADCAST [RS_287]
-                                                                          Group By Operator [GBY_286] (rows=1 width=12)
+                                                                        BROADCAST [RS_291]
+                                                                          Group By Operator [GBY_290] (rows=1 width=12)
                                                                             Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=41342400)"]
                                                                           <-Reducer 2 [CUSTOM_SIMPLE_EDGE]
                                                                             PARTITION_ONLY_SHUFFLE [RS_174]
@@ -319,5 +319,5 @@ Stage-0
                                                                                 Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=41342400)"]
                                                                                 Select Operator [SEL_172] (rows=41342400 width=15)
                                                                                   Output:["_col0"]
-                                                                                   Please refer to the previous Merge Join Operator [MERGEJOIN_238]
+                                                                                   Please refer to the previous Merge Join Operator [MERGEJOIN_242]
 


[14/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query2.q.out b/ql/src/test/results/clientpositive/perf/tez/query2.q.out
index d24899c..5f90894 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query2.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query2.q.out
@@ -139,33 +139,33 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_191]
-        Select Operator [SEL_190] (rows=287491028 width=135)
+      File Output Operator [FS_195]
+        Select Operator [SEL_194] (rows=287491028 width=135)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
         <-Reducer 6 [SIMPLE_EDGE]
           SHUFFLE [RS_58]
             Select Operator [SEL_57] (rows=287491028 width=135)
               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-              Merge Join Operator [MERGEJOIN_143] (rows=287491028 width=135)
+              Merge Join Operator [MERGEJOIN_147] (rows=287491028 width=135)
                 Conds:RS_54._col0=RS_55.(_col0 - 53)(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col9","_col10","_col11","_col12","_col13","_col14","_col15"]
               <-Reducer 13 [SIMPLE_EDGE]
                 SHUFFLE [RS_55]
                   PartitionCols:(_col0 - 53)
-                  Merge Join Operator [MERGEJOIN_142] (rows=261355475 width=135)
-                    Conds:RS_189._col0=RS_187._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                  Merge Join Operator [MERGEJOIN_146] (rows=261355475 width=135)
+                    Conds:RS_193._col0=RS_191._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                   <-Map 15 [SIMPLE_EDGE] vectorized
-                    SHUFFLE [RS_187]
+                    SHUFFLE [RS_191]
                       PartitionCols:_col0
-                      Select Operator [SEL_185] (rows=36524 width=1119)
+                      Select Operator [SEL_189] (rows=36524 width=1119)
                         Output:["_col0"]
-                        Filter Operator [FIL_183] (rows=36524 width=1119)
+                        Filter Operator [FIL_187] (rows=36524 width=1119)
                           predicate:((d_year = 2002) and d_week_seq is not null)
                           TableScan [TS_20] (rows=73049 width=1119)
                             default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_week_seq","d_year"]
                   <-Reducer 12 [ONE_TO_ONE_EDGE] vectorized
-                    FORWARD [RS_189]
+                    FORWARD [RS_193]
                       PartitionCols:_col0
-                      Group By Operator [GBY_188] (rows=237595882 width=135)
+                      Group By Operator [GBY_192] (rows=237595882 width=135)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)"],keys:KEY._col0
                       <-Reducer 11 [SIMPLE_EDGE]
                         SHUFFLE [RS_44]
@@ -174,67 +174,67 @@ Stage-0
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)","sum(_col5)","sum(_col6)","sum(_col7)"],keys:_col0
                             Select Operator [SEL_41] (rows=475191764 width=135)
                               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                              Merge Join Operator [MERGEJOIN_141] (rows=475191764 width=135)
-                                Conds:Union 17._col0=RS_168._col0(Inner),Output:["_col1","_col3","_col4"]
+                              Merge Join Operator [MERGEJOIN_145] (rows=475191764 width=135)
+                                Conds:Union 17._col0=RS_172._col0(Inner),Output:["_col1","_col3","_col4"]
                               <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_168]
+                                SHUFFLE [RS_172]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_165] (rows=73049 width=1119)
+                                  Select Operator [SEL_169] (rows=73049 width=1119)
                                     Output:["_col0","_col1","_col2"]
-                                    Filter Operator [FIL_164] (rows=73049 width=1119)
+                                    Filter Operator [FIL_168] (rows=73049 width=1119)
                                       predicate:(d_date_sk is not null and d_week_seq is not null)
                                       TableScan [TS_8] (rows=73049 width=1119)
                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_week_seq","d_day_name"]
                               <-Union 17 [SIMPLE_EDGE]
                                 <-Map 16 [CONTAINS] vectorized
-                                  Reduce Output Operator [RS_200]
+                                  Reduce Output Operator [RS_204]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_199] (rows=144002668 width=135)
+                                    Select Operator [SEL_203] (rows=144002668 width=135)
                                       Output:["_col0","_col1"]
-                                      Filter Operator [FIL_198] (rows=144002668 width=135)
+                                      Filter Operator [FIL_202] (rows=144002668 width=135)
                                         predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_39_date_dim_d_date_sk_min) AND DynamicValue(RS_39_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_39_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
-                                        TableScan [TS_154] (rows=144002668 width=135)
+                                        TableScan [TS_158] (rows=144002668 width=135)
                                           Output:["ws_sold_date_sk","ws_ext_sales_price"]
                                         <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_196]
-                                            Group By Operator [GBY_195] (rows=1 width=12)
+                                          BROADCAST [RS_200]
+                                            Group By Operator [GBY_199] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                             <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_173]
-                                                Group By Operator [GBY_171] (rows=1 width=12)
+                                              SHUFFLE [RS_177]
+                                                Group By Operator [GBY_175] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_169] (rows=73049 width=1119)
+                                                  Select Operator [SEL_173] (rows=73049 width=1119)
                                                     Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_165]
+                                                     Please refer to the previous Select Operator [SEL_169]
                                 <-Map 18 [CONTAINS] vectorized
-                                  Reduce Output Operator [RS_203]
+                                  Reduce Output Operator [RS_207]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_202] (rows=287989836 width=135)
+                                    Select Operator [SEL_206] (rows=287989836 width=135)
                                       Output:["_col0","_col1"]
-                                      Filter Operator [FIL_201] (rows=287989836 width=135)
+                                      Filter Operator [FIL_205] (rows=287989836 width=135)
                                         predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_39_date_dim_d_date_sk_min) AND DynamicValue(RS_39_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_39_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
-                                        TableScan [TS_159] (rows=287989836 width=135)
+                                        TableScan [TS_163] (rows=287989836 width=135)
                                           Output:["cs_sold_date_sk","cs_ext_sales_price"]
                                         <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_197]
-                                             Please refer to the previous Group By Operator [GBY_195]
+                                          BROADCAST [RS_201]
+                                             Please refer to the previous Group By Operator [GBY_199]
               <-Reducer 5 [ONE_TO_ONE_EDGE]
                 FORWARD [RS_54]
                   PartitionCols:_col0
-                  Merge Join Operator [MERGEJOIN_140] (rows=261355475 width=135)
-                    Conds:RS_181._col0=RS_186._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+                  Merge Join Operator [MERGEJOIN_144] (rows=261355475 width=135)
+                    Conds:RS_185._col0=RS_190._col0(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
                   <-Map 15 [SIMPLE_EDGE] vectorized
-                    SHUFFLE [RS_186]
+                    SHUFFLE [RS_190]
                       PartitionCols:_col0
-                      Select Operator [SEL_184] (rows=36524 width=1119)
+                      Select Operator [SEL_188] (rows=36524 width=1119)
                         Output:["_col0"]
-                        Filter Operator [FIL_182] (rows=36524 width=1119)
+                        Filter Operator [FIL_186] (rows=36524 width=1119)
                           predicate:((d_year = 2001) and d_week_seq is not null)
                            Please refer to the previous TableScan [TS_20]
                   <-Reducer 4 [ONE_TO_ONE_EDGE] vectorized
-                    FORWARD [RS_181]
+                    FORWARD [RS_185]
                       PartitionCols:_col0
-                      Group By Operator [GBY_180] (rows=237595882 width=135)
+                      Group By Operator [GBY_184] (rows=237595882 width=135)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)","sum(VALUE._col4)","sum(VALUE._col5)","sum(VALUE._col6)"],keys:KEY._col0
                       <-Reducer 3 [SIMPLE_EDGE]
                         SHUFFLE [RS_17]
@@ -243,43 +243,43 @@ Stage-0
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"],aggregations:["sum(_col1)","sum(_col2)","sum(_col3)","sum(_col4)","sum(_col5)","sum(_col6)","sum(_col7)"],keys:_col0
                             Select Operator [SEL_14] (rows=475191764 width=135)
                               Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
-                              Merge Join Operator [MERGEJOIN_139] (rows=475191764 width=135)
-                                Conds:Union 2._col0=RS_166._col0(Inner),Output:["_col1","_col3","_col4"]
+                              Merge Join Operator [MERGEJOIN_143] (rows=475191764 width=135)
+                                Conds:Union 2._col0=RS_170._col0(Inner),Output:["_col1","_col3","_col4"]
                               <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_166]
+                                SHUFFLE [RS_170]
                                   PartitionCols:_col0
-                                   Please refer to the previous Select Operator [SEL_165]
+                                   Please refer to the previous Select Operator [SEL_169]
                               <-Union 2 [SIMPLE_EDGE]
                                 <-Map 1 [CONTAINS] vectorized
-                                  Reduce Output Operator [RS_179]
+                                  Reduce Output Operator [RS_183]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_178] (rows=144002668 width=135)
+                                    Select Operator [SEL_182] (rows=144002668 width=135)
                                       Output:["_col0","_col1"]
-                                      Filter Operator [FIL_177] (rows=144002668 width=135)
+                                      Filter Operator [FIL_181] (rows=144002668 width=135)
                                         predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_12_date_dim_d_date_sk_min) AND DynamicValue(RS_12_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_12_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
-                                        TableScan [TS_144] (rows=144002668 width=135)
+                                        TableScan [TS_148] (rows=144002668 width=135)
                                           Output:["ws_sold_date_sk","ws_ext_sales_price"]
                                         <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_175]
-                                            Group By Operator [GBY_174] (rows=1 width=12)
+                                          BROADCAST [RS_179]
+                                            Group By Operator [GBY_178] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                             <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_172]
-                                                Group By Operator [GBY_170] (rows=1 width=12)
+                                              SHUFFLE [RS_176]
+                                                Group By Operator [GBY_174] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                  Select Operator [SEL_167] (rows=73049 width=1119)
+                                                  Select Operator [SEL_171] (rows=73049 width=1119)
                                                     Output:["_col0"]
-                                                     Please refer to the previous Select Operator [SEL_165]
+                                                     Please refer to the previous Select Operator [SEL_169]
                                 <-Map 8 [CONTAINS] vectorized
-                                  Reduce Output Operator [RS_194]
+                                  Reduce Output Operator [RS_198]
                                     PartitionCols:_col0
-                                    Select Operator [SEL_193] (rows=287989836 width=135)
+                                    Select Operator [SEL_197] (rows=287989836 width=135)
                                       Output:["_col0","_col1"]
-                                      Filter Operator [FIL_192] (rows=287989836 width=135)
+                                      Filter Operator [FIL_196] (rows=287989836 width=135)
                                         predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_12_date_dim_d_date_sk_min) AND DynamicValue(RS_12_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_12_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
-                                        TableScan [TS_149] (rows=287989836 width=135)
+                                        TableScan [TS_153] (rows=287989836 width=135)
                                           Output:["cs_sold_date_sk","cs_ext_sales_price"]
                                         <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                          BROADCAST [RS_176]
-                                             Please refer to the previous Group By Operator [GBY_174]
+                                          BROADCAST [RS_180]
+                                             Please refer to the previous Group By Operator [GBY_178]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query23.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query23.q.out b/ql/src/test/results/clientpositive/perf/tez/query23.q.out
index 6725bec..aab3f93 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query23.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query23.q.out
@@ -1,5 +1,5 @@
-Warning: Shuffle Join MERGEJOIN[581][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 27' is a cross product
-Warning: Shuffle Join MERGEJOIN[583][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 35' is a cross product
+Warning: Shuffle Join MERGEJOIN[585][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 28' is a cross product
+Warning: Shuffle Join MERGEJOIN[587][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 36' is a cross product
 PREHOOK: query: explain
 with frequent_ss_items as 
  (select substr(i_item_desc,1,30) itemdesc,i_item_sk item_sk,d_date solddate,count(*) cnt
@@ -107,11 +107,11 @@ Plan optimized by CBO.
 Vertex dependency in root stage
 Map 1 <- Reducer 17 (BROADCAST_EDGE), Reducer 8 (BROADCAST_EDGE)
 Map 13 <- Reducer 19 (BROADCAST_EDGE), Reducer 21 (BROADCAST_EDGE)
-Map 22 <- Reducer 30 (BROADCAST_EDGE)
-Map 42 <- Reducer 12 (BROADCAST_EDGE)
-Map 43 <- Reducer 37 (BROADCAST_EDGE)
+Map 23 <- Reducer 31 (BROADCAST_EDGE)
+Map 43 <- Reducer 12 (BROADCAST_EDGE), Reducer 22 (BROADCAST_EDGE)
+Map 44 <- Reducer 38 (BROADCAST_EDGE)
 Reducer 10 <- Reducer 16 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE)
-Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 35 (SIMPLE_EDGE), Union 5 (CONTAINS)
+Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 36 (SIMPLE_EDGE), Union 5 (CONTAINS)
 Reducer 12 <- Map 7 (CUSTOM_SIMPLE_EDGE)
 Reducer 14 <- Map 13 (SIMPLE_EDGE), Map 18 (SIMPLE_EDGE)
 Reducer 15 <- Map 20 (SIMPLE_EDGE), Reducer 14 (SIMPLE_EDGE)
@@ -120,64 +120,65 @@ Reducer 17 <- Reducer 16 (CUSTOM_SIMPLE_EDGE)
 Reducer 19 <- Map 18 (CUSTOM_SIMPLE_EDGE)
 Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
 Reducer 21 <- Map 20 (CUSTOM_SIMPLE_EDGE)
-Reducer 23 <- Map 22 (SIMPLE_EDGE), Map 29 (SIMPLE_EDGE)
-Reducer 24 <- Map 41 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE)
-Reducer 25 <- Reducer 24 (SIMPLE_EDGE)
-Reducer 26 <- Reducer 25 (CUSTOM_SIMPLE_EDGE)
-Reducer 27 <- Reducer 26 (CUSTOM_SIMPLE_EDGE), Reducer 28 (CUSTOM_SIMPLE_EDGE), Reducer 40 (CUSTOM_SIMPLE_EDGE)
-Reducer 28 <- Reducer 25 (CUSTOM_SIMPLE_EDGE)
+Reducer 22 <- Map 20 (CUSTOM_SIMPLE_EDGE)
+Reducer 24 <- Map 23 (SIMPLE_EDGE), Map 30 (SIMPLE_EDGE)
+Reducer 25 <- Map 42 (SIMPLE_EDGE), Reducer 24 (SIMPLE_EDGE)
+Reducer 26 <- Reducer 25 (SIMPLE_EDGE)
+Reducer 27 <- Reducer 26 (CUSTOM_SIMPLE_EDGE)
+Reducer 28 <- Reducer 27 (CUSTOM_SIMPLE_EDGE), Reducer 29 (CUSTOM_SIMPLE_EDGE), Reducer 41 (CUSTOM_SIMPLE_EDGE)
+Reducer 29 <- Reducer 26 (CUSTOM_SIMPLE_EDGE)
 Reducer 3 <- Reducer 16 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE)
-Reducer 30 <- Map 29 (CUSTOM_SIMPLE_EDGE)
-Reducer 31 <- Map 29 (SIMPLE_EDGE), Map 43 (SIMPLE_EDGE)
-Reducer 32 <- Map 41 (SIMPLE_EDGE), Reducer 31 (SIMPLE_EDGE)
-Reducer 33 <- Reducer 32 (SIMPLE_EDGE)
-Reducer 34 <- Reducer 33 (CUSTOM_SIMPLE_EDGE)
-Reducer 35 <- Reducer 34 (CUSTOM_SIMPLE_EDGE), Reducer 36 (CUSTOM_SIMPLE_EDGE), Reducer 40 (CUSTOM_SIMPLE_EDGE)
-Reducer 36 <- Reducer 33 (CUSTOM_SIMPLE_EDGE)
-Reducer 37 <- Map 29 (CUSTOM_SIMPLE_EDGE)
-Reducer 39 <- Map 38 (SIMPLE_EDGE), Map 41 (SIMPLE_EDGE)
-Reducer 4 <- Reducer 27 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE), Union 5 (CONTAINS)
-Reducer 40 <- Reducer 39 (SIMPLE_EDGE)
+Reducer 31 <- Map 30 (CUSTOM_SIMPLE_EDGE)
+Reducer 32 <- Map 30 (SIMPLE_EDGE), Map 44 (SIMPLE_EDGE)
+Reducer 33 <- Map 42 (SIMPLE_EDGE), Reducer 32 (SIMPLE_EDGE)
+Reducer 34 <- Reducer 33 (SIMPLE_EDGE)
+Reducer 35 <- Reducer 34 (CUSTOM_SIMPLE_EDGE)
+Reducer 36 <- Reducer 35 (CUSTOM_SIMPLE_EDGE), Reducer 37 (CUSTOM_SIMPLE_EDGE), Reducer 41 (CUSTOM_SIMPLE_EDGE)
+Reducer 37 <- Reducer 34 (CUSTOM_SIMPLE_EDGE)
+Reducer 38 <- Map 30 (CUSTOM_SIMPLE_EDGE)
+Reducer 4 <- Reducer 28 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE), Union 5 (CONTAINS)
+Reducer 40 <- Map 39 (SIMPLE_EDGE), Map 42 (SIMPLE_EDGE)
+Reducer 41 <- Reducer 40 (SIMPLE_EDGE)
 Reducer 6 <- Union 5 (CUSTOM_SIMPLE_EDGE)
 Reducer 8 <- Map 7 (CUSTOM_SIMPLE_EDGE)
-Reducer 9 <- Map 42 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
+Reducer 9 <- Map 43 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE)
 
 Stage-0
   Fetch Operator
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_684]
-        Limit [LIM_683] (rows=1 width=112)
+      File Output Operator [FS_691]
+        Limit [LIM_690] (rows=1 width=112)
           Number of rows:100
-          Group By Operator [GBY_682] (rows=1 width=112)
+          Group By Operator [GBY_689] (rows=1 width=112)
             Output:["_col0"],aggregations:["sum(VALUE._col0)"]
           <-Union 5 [CUSTOM_SIMPLE_EDGE]
             <-Reducer 11 [CONTAINS]
-              Reduce Output Operator [RS_594]
-                Group By Operator [GBY_593] (rows=1 width=112)
+              Reduce Output Operator [RS_598]
+                Group By Operator [GBY_597] (rows=1 width=112)
                   Output:["_col0"],aggregations:["sum(_col0)"]
-                  Select Operator [SEL_591] (rows=191667562 width=135)
+                  Select Operator [SEL_595] (rows=191667562 width=135)
                     Output:["_col0"]
-                    Merge Join Operator [MERGEJOIN_590] (rows=191667562 width=135)
+                    Merge Join Operator [MERGEJOIN_594] (rows=191667562 width=135)
                       Conds:RS_244._col2=RS_245._col0(Inner),Output:["_col3","_col4"]
                     <-Reducer 10 [SIMPLE_EDGE]
                       SHUFFLE [RS_244]
                         PartitionCols:_col2
-                        Merge Join Operator [MERGEJOIN_580] (rows=174243235 width=135)
-                          Conds:RS_241._col1=RS_633._col0(Inner),Output:["_col2","_col3","_col4"]
+                        Merge Join Operator [MERGEJOIN_584] (rows=174243235 width=135)
+                          Conds:RS_241._col1=RS_640._col0(Inner),Output:["_col2","_col3","_col4"]
                         <-Reducer 16 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_633]
+                          SHUFFLE [RS_640]
                             PartitionCols:_col0
-                            Group By Operator [GBY_630] (rows=58079562 width=88)
+                            Group By Operator [GBY_637] (rows=58079562 width=88)
                               Output:["_col0"],keys:_col1
-                              Select Operator [SEL_629] (rows=116159124 width=88)
+                              Select Operator [SEL_636] (rows=116159124 width=88)
                                 Output:["_col1"]
-                                Filter Operator [FIL_628] (rows=116159124 width=88)
+                                Filter Operator [FIL_635] (rows=116159124 width=88)
                                   predicate:(_col3 > 4L)
-                                  Select Operator [SEL_627] (rows=348477374 width=88)
+                                  Select Operator [SEL_634] (rows=348477374 width=88)
                                     Output:["_col0","_col3"]
-                                    Group By Operator [GBY_626] (rows=348477374 width=88)
+                                    Group By Operator [GBY_633] (rows=348477374 width=88)
                                       Output:["_col0","_col1","_col2","_col3"],aggregations:["count(VALUE._col0)"],keys:KEY._col0, KEY._col1, KEY._col2
                                     <-Reducer 15 [SIMPLE_EDGE]
                                       SHUFFLE [RS_24]
@@ -186,350 +187,361 @@ Stage-0
                                           Output:["_col0","_col1","_col2","_col3"],aggregations:["count()"],keys:_col1, _col0, _col2
                                           Select Operator [SEL_21] (rows=696954748 width=88)
                                             Output:["_col0","_col1","_col2"]
-                                            Merge Join Operator [MERGEJOIN_565] (rows=696954748 width=88)
-                                              Conds:RS_18._col1=RS_617._col0(Inner),Output:["_col3","_col5","_col6"]
+                                            Merge Join Operator [MERGEJOIN_569] (rows=696954748 width=88)
+                                              Conds:RS_18._col1=RS_621._col0(Inner),Output:["_col3","_col5","_col6"]
                                             <-Map 20 [SIMPLE_EDGE] vectorized
-                                              PARTITION_ONLY_SHUFFLE [RS_617]
+                                              PARTITION_ONLY_SHUFFLE [RS_621]
                                                 PartitionCols:_col0
-                                                Select Operator [SEL_616] (rows=462000 width=1436)
+                                                Select Operator [SEL_620] (rows=462000 width=1436)
                                                   Output:["_col0","_col1"]
-                                                  Filter Operator [FIL_615] (rows=462000 width=1436)
+                                                  Filter Operator [FIL_619] (rows=462000 width=1436)
                                                     predicate:i_item_sk is not null
                                                     TableScan [TS_12] (rows=462000 width=1436)
                                                       default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_desc"]
                                             <-Reducer 14 [SIMPLE_EDGE]
                                               SHUFFLE [RS_18]
                                                 PartitionCols:_col1
-                                                Merge Join Operator [MERGEJOIN_564] (rows=633595212 width=88)
-                                                  Conds:RS_625._col0=RS_609._col0(Inner),Output:["_col1","_col3"]
+                                                Merge Join Operator [MERGEJOIN_568] (rows=633595212 width=88)
+                                                  Conds:RS_632._col0=RS_613._col0(Inner),Output:["_col1","_col3"]
                                                 <-Map 18 [SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_609]
+                                                  PARTITION_ONLY_SHUFFLE [RS_613]
                                                     PartitionCols:_col0
-                                                    Select Operator [SEL_608] (rows=36525 width=1119)
+                                                    Select Operator [SEL_612] (rows=36525 width=1119)
                                                       Output:["_col0","_col1"]
-                                                      Filter Operator [FIL_607] (rows=36525 width=1119)
+                                                      Filter Operator [FIL_611] (rows=36525 width=1119)
                                                         predicate:((d_year) IN (1999, 2000, 2001, 2002) and d_date_sk is not null)
                                                         TableScan [TS_9] (rows=73049 width=1119)
                                                           default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date","d_year"]
                                                 <-Map 13 [SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_625]
+                                                  SHUFFLE [RS_632]
                                                     PartitionCols:_col0
-                                                    Select Operator [SEL_624] (rows=575995635 width=88)
+                                                    Select Operator [SEL_631] (rows=575995635 width=88)
                                                       Output:["_col0","_col1"]
-                                                      Filter Operator [FIL_623] (rows=575995635 width=88)
+                                                      Filter Operator [FIL_630] (rows=575995635 width=88)
                                                         predicate:((ss_item_sk BETWEEN DynamicValue(RS_19_item_i_item_sk_min) AND DynamicValue(RS_19_item_i_item_sk_max) and in_bloom_filter(ss_item_sk, DynamicValue(RS_19_item_i_item_sk_bloom_filter))) and (ss_sold_date_sk BETWEEN DynamicValue(RS_16_date_dim_d_date_sk_min) AND DynamicValue(RS_16_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_16_date_dim_d_date_sk_bloom_filter))) and ss_item_sk is not null and ss_sold_date_sk is not null)
                                                         TableScan [TS_6] (rows=575995635 width=88)
                                                           default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk"]
                                                         <-Reducer 19 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_614]
-                                                            Group By Operator [GBY_613] (rows=1 width=12)
+                                                          BROADCAST [RS_618]
+                                                            Group By Operator [GBY_617] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                             <-Map 18 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              PARTITION_ONLY_SHUFFLE [RS_612]
-                                                                Group By Operator [GBY_611] (rows=1 width=12)
+                                                              PARTITION_ONLY_SHUFFLE [RS_616]
+                                                                Group By Operator [GBY_615] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                  Select Operator [SEL_610] (rows=36525 width=1119)
+                                                                  Select Operator [SEL_614] (rows=36525 width=1119)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_608]
+                                                                     Please refer to the previous Select Operator [SEL_612]
                                                         <-Reducer 21 [BROADCAST_EDGE] vectorized
-                                                          BROADCAST [RS_622]
-                                                            Group By Operator [GBY_621] (rows=1 width=12)
+                                                          BROADCAST [RS_629]
+                                                            Group By Operator [GBY_628] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                             <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                              PARTITION_ONLY_SHUFFLE [RS_620]
-                                                                Group By Operator [GBY_619] (rows=1 width=12)
+                                                              PARTITION_ONLY_SHUFFLE [RS_626]
+                                                                Group By Operator [GBY_624] (rows=1 width=12)
                                                                   Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                  Select Operator [SEL_618] (rows=462000 width=1436)
+                                                                  Select Operator [SEL_622] (rows=462000 width=1436)
                                                                     Output:["_col0"]
-                                                                     Please refer to the previous Select Operator [SEL_616]
+                                                                     Please refer to the previous Select Operator [SEL_620]
                         <-Reducer 9 [SIMPLE_EDGE]
                           SHUFFLE [RS_241]
                             PartitionCols:_col1
-                            Merge Join Operator [MERGEJOIN_571] (rows=158402938 width=135)
-                              Conds:RS_689._col0=RS_599._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                            Merge Join Operator [MERGEJOIN_575] (rows=158402938 width=135)
+                              Conds:RS_698._col0=RS_603._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                             <-Map 7 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_599]
+                              SHUFFLE [RS_603]
                                 PartitionCols:_col0
-                                Select Operator [SEL_596] (rows=18262 width=1119)
+                                Select Operator [SEL_600] (rows=18262 width=1119)
                                   Output:["_col0"]
-                                  Filter Operator [FIL_595] (rows=18262 width=1119)
+                                  Filter Operator [FIL_599] (rows=18262 width=1119)
                                     predicate:((d_moy = 1) and (d_year = 1999) and d_date_sk is not null)
                                     TableScan [TS_3] (rows=73049 width=1119)
                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
-                            <-Map 42 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_689]
+                            <-Map 43 [SIMPLE_EDGE] vectorized
+                              SHUFFLE [RS_698]
                                 PartitionCols:_col0
-                                Select Operator [SEL_688] (rows=144002668 width=135)
+                                Select Operator [SEL_697] (rows=144002668 width=135)
                                   Output:["_col0","_col1","_col2","_col3","_col4"]
-                                  Filter Operator [FIL_687] (rows=144002668 width=135)
-                                    predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_239_date_dim_d_date_sk_min) AND DynamicValue(RS_239_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_239_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
+                                  Filter Operator [FIL_696] (rows=144002668 width=135)
+                                    predicate:((ws_item_sk BETWEEN DynamicValue(RS_143_item_i_item_sk_min) AND DynamicValue(RS_143_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_143_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_239_date_dim_d_date_sk_min) AND DynamicValue(RS_239_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_239_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
                                     TableScan [TS_124] (rows=144002668 width=135)
                                       default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk","ws_quantity","ws_list_price"]
                                     <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_686]
-                                        Group By Operator [GBY_685] (rows=1 width=12)
+                                      BROADCAST [RS_693]
+                                        Group By Operator [GBY_692] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                         <-Map 7 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_604]
-                                            Group By Operator [GBY_602] (rows=1 width=12)
+                                          SHUFFLE [RS_608]
+                                            Group By Operator [GBY_606] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_600] (rows=18262 width=1119)
+                                              Select Operator [SEL_604] (rows=18262 width=1119)
                                                 Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_596]
-                    <-Reducer 35 [SIMPLE_EDGE]
+                                                 Please refer to the previous Select Operator [SEL_600]
+                                    <-Reducer 22 [BROADCAST_EDGE] vectorized
+                                      BROADCAST [RS_695]
+                                        Group By Operator [GBY_694] (rows=1 width=12)
+                                          Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
+                                        <-Map 20 [CUSTOM_SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_627]
+                                            Group By Operator [GBY_625] (rows=1 width=12)
+                                              Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
+                                              Select Operator [SEL_623] (rows=462000 width=1436)
+                                                Output:["_col0"]
+                                                 Please refer to the previous Select Operator [SEL_620]
+                    <-Reducer 36 [SIMPLE_EDGE]
                       SHUFFLE [RS_245]
                         PartitionCols:_col0
                         Select Operator [SEL_237] (rows=105599202 width=433)
                           Output:["_col0"]
                           Filter Operator [FIL_236] (rows=105599202 width=433)
                             predicate:(_col3 > (0.95 * _col1))
-                            Merge Join Operator [MERGEJOIN_583] (rows=316797606 width=433)
+                            Merge Join Operator [MERGEJOIN_587] (rows=316797606 width=433)
                               Conds:(Inner),(Inner),Output:["_col1","_col2","_col3"]
-                            <-Reducer 40 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_681]
-                                Group By Operator [GBY_679] (rows=316797606 width=88)
+                            <-Reducer 41 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_688]
+                                Group By Operator [GBY_686] (rows=316797606 width=88)
                                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                                <-Reducer 39 [SIMPLE_EDGE]
+                                <-Reducer 40 [SIMPLE_EDGE]
                                   SHUFFLE [RS_105]
                                     PartitionCols:_col0
                                     Group By Operator [GBY_104] (rows=633595212 width=88)
                                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
                                       Select Operator [SEL_102] (rows=633595212 width=88)
                                         Output:["_col0","_col1"]
-                                        Merge Join Operator [MERGEJOIN_570] (rows=633595212 width=88)
-                                          Conds:RS_678._col0=RS_658._col0(Inner),Output:["_col1","_col2","_col3"]
-                                        <-Map 41 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_658]
+                                        Merge Join Operator [MERGEJOIN_574] (rows=633595212 width=88)
+                                          Conds:RS_685._col0=RS_665._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        <-Map 42 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_665]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_657] (rows=80000000 width=860)
+                                            Select Operator [SEL_664] (rows=80000000 width=860)
                                               Output:["_col0"]
-                                              Filter Operator [FIL_656] (rows=80000000 width=860)
+                                              Filter Operator [FIL_663] (rows=80000000 width=860)
                                                 predicate:c_customer_sk is not null
                                                 TableScan [TS_96] (rows=80000000 width=860)
                                                   default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk"]
-                                        <-Map 38 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_678]
+                                        <-Map 39 [SIMPLE_EDGE] vectorized
+                                          SHUFFLE [RS_685]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_677] (rows=575995635 width=88)
+                                            Select Operator [SEL_684] (rows=575995635 width=88)
                                               Output:["_col0","_col1","_col2"]
-                                              Filter Operator [FIL_676] (rows=575995635 width=88)
+                                              Filter Operator [FIL_683] (rows=575995635 width=88)
                                                 predicate:ss_customer_sk is not null
                                                 TableScan [TS_93] (rows=575995635 width=88)
                                                   default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_customer_sk","ss_quantity","ss_sales_price"]
-                            <-Reducer 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_707]
-                                Select Operator [SEL_706] (rows=1 width=120)
-                                  Filter Operator [FIL_705] (rows=1 width=120)
+                            <-Reducer 35 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_716]
+                                Select Operator [SEL_715] (rows=1 width=120)
+                                  Filter Operator [FIL_714] (rows=1 width=120)
                                     predicate:(sq_count_check(_col0) <= 1)
-                                    Group By Operator [GBY_704] (rows=1 width=120)
+                                    Group By Operator [GBY_713] (rows=1 width=120)
                                       Output:["_col0"],aggregations:["count()"]
-                                      Select Operator [SEL_703] (rows=1 width=120)
-                                        Group By Operator [GBY_702] (rows=1 width=120)
+                                      Select Operator [SEL_712] (rows=1 width=120)
+                                        Group By Operator [GBY_711] (rows=1 width=120)
                                           Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                                        <-Reducer 33 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          PARTITION_ONLY_SHUFFLE [RS_700]
-                                            Group By Operator [GBY_698] (rows=1 width=120)
+                                        <-Reducer 34 [CUSTOM_SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_709]
+                                            Group By Operator [GBY_707] (rows=1 width=120)
                                               Output:["_col0"],aggregations:["count(_col0)"]
-                                              Select Operator [SEL_696] (rows=348477374 width=88)
+                                              Select Operator [SEL_705] (rows=348477374 width=88)
                                                 Output:["_col0"]
-                                                Group By Operator [GBY_695] (rows=348477374 width=88)
+                                                Group By Operator [GBY_704] (rows=348477374 width=88)
                                                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                                                <-Reducer 32 [SIMPLE_EDGE]
+                                                <-Reducer 33 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_175]
                                                     PartitionCols:_col0
                                                     Group By Operator [GBY_174] (rows=696954748 width=88)
                                                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
                                                       Select Operator [SEL_172] (rows=696954748 width=88)
                                                         Output:["_col0","_col1"]
-                                                        Merge Join Operator [MERGEJOIN_575] (rows=696954748 width=88)
-                                                          Conds:RS_169._col1=RS_660._col0(Inner),Output:["_col2","_col3","_col6"]
-                                                        <-Map 41 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_660]
+                                                        Merge Join Operator [MERGEJOIN_579] (rows=696954748 width=88)
+                                                          Conds:RS_169._col1=RS_667._col0(Inner),Output:["_col2","_col3","_col6"]
+                                                        <-Map 42 [SIMPLE_EDGE] vectorized
+                                                          SHUFFLE [RS_667]
                                                             PartitionCols:_col0
-                                                             Please refer to the previous Select Operator [SEL_657]
-                                                        <-Reducer 31 [SIMPLE_EDGE]
+                                                             Please refer to the previous Select Operator [SEL_664]
+                                                        <-Reducer 32 [SIMPLE_EDGE]
                                                           SHUFFLE [RS_169]
                                                             PartitionCols:_col1
-                                                            Merge Join Operator [MERGEJOIN_574] (rows=633595212 width=88)
-                                                              Conds:RS_694._col0=RS_645._col0(Inner),Output:["_col1","_col2","_col3"]
-                                                            <-Map 29 [SIMPLE_EDGE] vectorized
-                                                              PARTITION_ONLY_SHUFFLE [RS_645]
+                                                            Merge Join Operator [MERGEJOIN_578] (rows=633595212 width=88)
+                                                              Conds:RS_703._col0=RS_652._col0(Inner),Output:["_col1","_col2","_col3"]
+                                                            <-Map 30 [SIMPLE_EDGE] vectorized
+                                                              PARTITION_ONLY_SHUFFLE [RS_652]
                                                                 PartitionCols:_col0
-                                                                Select Operator [SEL_642] (rows=36525 width=1119)
+                                                                Select Operator [SEL_649] (rows=36525 width=1119)
                                                                   Output:["_col0"]
-                                                                  Filter Operator [FIL_641] (rows=36525 width=1119)
+                                                                  Filter Operator [FIL_648] (rows=36525 width=1119)
                                                                     predicate:((d_year) IN (1999, 2000, 2001, 2002) and d_date_sk is not null)
                                                                     TableScan [TS_36] (rows=73049 width=1119)
                                                                       default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
-                                                            <-Map 43 [SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_694]
+                                                            <-Map 44 [SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_703]
                                                                 PartitionCols:_col0
-                                                                Select Operator [SEL_693] (rows=575995635 width=88)
+                                                                Select Operator [SEL_702] (rows=575995635 width=88)
                                                                   Output:["_col0","_col1","_col2","_col3"]
-                                                                  Filter Operator [FIL_692] (rows=575995635 width=88)
+                                                                  Filter Operator [FIL_701] (rows=575995635 width=88)
                                                                     predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_167_date_dim_d_date_sk_min) AND DynamicValue(RS_167_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_167_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
                                                                     TableScan [TS_157] (rows=575995635 width=88)
                                                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_quantity","ss_sales_price"]
-                                                                    <-Reducer 37 [BROADCAST_EDGE] vectorized
-                                                                      BROADCAST [RS_691]
-                                                                        Group By Operator [GBY_690] (rows=1 width=12)
+                                                                    <-Reducer 38 [BROADCAST_EDGE] vectorized
+                                                                      BROADCAST [RS_700]
+                                                                        Group By Operator [GBY_699] (rows=1 width=12)
                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                        <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                          PARTITION_ONLY_SHUFFLE [RS_650]
-                                                                            Group By Operator [GBY_648] (rows=1 width=12)
+                                                                        <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                          PARTITION_ONLY_SHUFFLE [RS_657]
+                                                                            Group By Operator [GBY_655] (rows=1 width=12)
                                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                              Select Operator [SEL_646] (rows=36525 width=1119)
+                                                                              Select Operator [SEL_653] (rows=36525 width=1119)
                                                                                 Output:["_col0"]
-                                                                                 Please refer to the previous Select Operator [SEL_642]
-                            <-Reducer 36 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_709]
-                                Group By Operator [GBY_708] (rows=1 width=224)
+                                                                                 Please refer to the previous Select Operator [SEL_649]
+                            <-Reducer 37 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_718]
+                                Group By Operator [GBY_717] (rows=1 width=224)
                                   Output:["_col0"],aggregations:["max(VALUE._col0)"]
-                                <-Reducer 33 [CUSTOM_SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_701]
-                                    Group By Operator [GBY_699] (rows=1 width=224)
+                                <-Reducer 34 [CUSTOM_SIMPLE_EDGE] vectorized
+                                  PARTITION_ONLY_SHUFFLE [RS_710]
+                                    Group By Operator [GBY_708] (rows=1 width=224)
                                       Output:["_col0"],aggregations:["max(_col1)"]
-                                      Select Operator [SEL_697] (rows=348477374 width=88)
+                                      Select Operator [SEL_706] (rows=348477374 width=88)
                                         Output:["_col1"]
-                                         Please refer to the previous Group By Operator [GBY_695]
+                                         Please refer to the previous Group By Operator [GBY_704]
             <-Reducer 4 [CONTAINS]
-              Reduce Output Operator [RS_589]
-                Group By Operator [GBY_588] (rows=1 width=112)
+              Reduce Output Operator [RS_593]
+                Group By Operator [GBY_592] (rows=1 width=112)
                   Output:["_col0"],aggregations:["sum(_col0)"]
-                  Select Operator [SEL_586] (rows=383314495 width=135)
+                  Select Operator [SEL_590] (rows=383314495 width=135)
                     Output:["_col0"]
-                    Merge Join Operator [MERGEJOIN_585] (rows=383314495 width=135)
+                    Merge Join Operator [MERGEJOIN_589] (rows=383314495 width=135)
                       Conds:RS_120._col1=RS_121._col0(Inner),Output:["_col3","_col4"]
-                    <-Reducer 27 [SIMPLE_EDGE]
+                    <-Reducer 28 [SIMPLE_EDGE]
                       SHUFFLE [RS_121]
                         PartitionCols:_col0
                         Select Operator [SEL_113] (rows=105599202 width=433)
                           Output:["_col0"]
                           Filter Operator [FIL_112] (rows=105599202 width=433)
                             predicate:(_col3 > (0.95 * _col1))
-                            Merge Join Operator [MERGEJOIN_581] (rows=316797606 width=433)
+                            Merge Join Operator [MERGEJOIN_585] (rows=316797606 width=433)
                               Conds:(Inner),(Inner),Output:["_col1","_col2","_col3"]
-                            <-Reducer 40 [CUSTOM_SIMPLE_EDGE] vectorized
+                            <-Reducer 41 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_687]
+                                 Please refer to the previous Group By Operator [GBY_686]
+                            <-Reducer 27 [CUSTOM_SIMPLE_EDGE] vectorized
                               PARTITION_ONLY_SHUFFLE [RS_680]
-                                 Please refer to the previous Group By Operator [GBY_679]
-                            <-Reducer 26 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_673]
-                                Select Operator [SEL_672] (rows=1 width=120)
-                                  Filter Operator [FIL_671] (rows=1 width=120)
+                                Select Operator [SEL_679] (rows=1 width=120)
+                                  Filter Operator [FIL_678] (rows=1 width=120)
                                     predicate:(sq_count_check(_col0) <= 1)
-                                    Group By Operator [GBY_670] (rows=1 width=120)
+                                    Group By Operator [GBY_677] (rows=1 width=120)
                                       Output:["_col0"],aggregations:["count()"]
-                                      Select Operator [SEL_669] (rows=1 width=120)
-                                        Group By Operator [GBY_668] (rows=1 width=120)
+                                      Select Operator [SEL_676] (rows=1 width=120)
+                                        Group By Operator [GBY_675] (rows=1 width=120)
                                           Output:["_col0"],aggregations:["count(VALUE._col0)"]
-                                        <-Reducer 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          PARTITION_ONLY_SHUFFLE [RS_666]
-                                            Group By Operator [GBY_664] (rows=1 width=120)
+                                        <-Reducer 26 [CUSTOM_SIMPLE_EDGE] vectorized
+                                          PARTITION_ONLY_SHUFFLE [RS_673]
+                                            Group By Operator [GBY_671] (rows=1 width=120)
                                               Output:["_col0"],aggregations:["count(_col0)"]
-                                              Select Operator [SEL_662] (rows=348477374 width=88)
+                                              Select Operator [SEL_669] (rows=348477374 width=88)
                                                 Output:["_col0"]
-                                                Group By Operator [GBY_661] (rows=348477374 width=88)
+                                                Group By Operator [GBY_668] (rows=348477374 width=88)
                                                   Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
-                                                <-Reducer 24 [SIMPLE_EDGE]
+                                                <-Reducer 25 [SIMPLE_EDGE]
                                                   SHUFFLE [RS_51]
                                                     PartitionCols:_col0
                                                     Group By Operator [GBY_50] (rows=696954748 width=88)
                                                       Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0
                                                       Select Operator [SEL_48] (rows=696954748 width=88)
                                                         Output:["_col0","_col1"]
-                                                        Merge Join Operator [MERGEJOIN_567] (rows=696954748 width=88)
-                                                          Conds:RS_45._col1=RS_659._col0(Inner),Output:["_col2","_col3","_col6"]
-                                                        <-Map 41 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_659]
+                                                        Merge Join Operator [MERGEJOIN_571] (rows=696954748 width=88)
+                                                          Conds:RS_45._col1=RS_666._col0(Inner),Output:["_col2","_col3","_col6"]
+                                                        <-Map 42 [SIMPLE_EDGE] vectorized
+                                                          SHUFFLE [RS_666]
                                                             PartitionCols:_col0
-                                                             Please refer to the previous Select Operator [SEL_657]
-                                                        <-Reducer 23 [SIMPLE_EDGE]
+                                                             Please refer to the previous Select Operator [SEL_664]
+                                                        <-Reducer 24 [SIMPLE_EDGE]
                                                           SHUFFLE [RS_45]
                                                             PartitionCols:_col1
-                                                            Merge Join Operator [MERGEJOIN_566] (rows=633595212 width=88)
-                                                              Conds:RS_655._col0=RS_643._col0(Inner),Output:["_col1","_col2","_col3"]
-                                                            <-Map 29 [SIMPLE_EDGE] vectorized
-                                                              PARTITION_ONLY_SHUFFLE [RS_643]
+                                                            Merge Join Operator [MERGEJOIN_570] (rows=633595212 width=88)
+                                                              Conds:RS_662._col0=RS_650._col0(Inner),Output:["_col1","_col2","_col3"]
+                                                            <-Map 30 [SIMPLE_EDGE] vectorized
+                                                              PARTITION_ONLY_SHUFFLE [RS_650]
                                                                 PartitionCols:_col0
-                                                                 Please refer to the previous Select Operator [SEL_642]
-                                                            <-Map 22 [SIMPLE_EDGE] vectorized
-                                                              SHUFFLE [RS_655]
+                                                                 Please refer to the previous Select Operator [SEL_649]
+                                                            <-Map 23 [SIMPLE_EDGE] vectorized
+                                                              SHUFFLE [RS_662]
                                                                 PartitionCols:_col0
-                                                                Select Operator [SEL_654] (rows=575995635 width=88)
+                                                                Select Operator [SEL_661] (rows=575995635 width=88)
                                                                   Output:["_col0","_col1","_col2","_col3"]
-                                                                  Filter Operator [FIL_653] (rows=575995635 width=88)
+                                                                  Filter Operator [FIL_660] (rows=575995635 width=88)
                                                                     predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_43_date_dim_d_date_sk_min) AND DynamicValue(RS_43_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_43_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
                                                                     TableScan [TS_33] (rows=575995635 width=88)
                                                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_quantity","ss_sales_price"]
-                                                                    <-Reducer 30 [BROADCAST_EDGE] vectorized
-                                                                      BROADCAST [RS_652]
-                                                                        Group By Operator [GBY_651] (rows=1 width=12)
+                                                                    <-Reducer 31 [BROADCAST_EDGE] vectorized
+                                                                      BROADCAST [RS_659]
+                                                                        Group By Operator [GBY_658] (rows=1 width=12)
                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
-                                                                        <-Map 29 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                          PARTITION_ONLY_SHUFFLE [RS_649]
-                                                                            Group By Operator [GBY_647] (rows=1 width=12)
+                                                                        <-Map 30 [CUSTOM_SIMPLE_EDGE] vectorized
+                                                                          PARTITION_ONLY_SHUFFLE [RS_656]
+                                                                            Group By Operator [GBY_654] (rows=1 width=12)
                                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                              Select Operator [SEL_644] (rows=36525 width=1119)
+                                                                              Select Operator [SEL_651] (rows=36525 width=1119)
                                                                                 Output:["_col0"]
-                                                                                 Please refer to the previous Select Operator [SEL_642]
-                            <-Reducer 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                              PARTITION_ONLY_SHUFFLE [RS_675]
-                                Group By Operator [GBY_674] (rows=1 width=224)
+                                                                                 Please refer to the previous Select Operator [SEL_649]
+                            <-Reducer 29 [CUSTOM_SIMPLE_EDGE] vectorized
+                              PARTITION_ONLY_SHUFFLE [RS_682]
+                                Group By Operator [GBY_681] (rows=1 width=224)
                                   Output:["_col0"],aggregations:["max(VALUE._col0)"]
-                                <-Reducer 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                  PARTITION_ONLY_SHUFFLE [RS_667]
-                                    Group By Operator [GBY_665] (rows=1 width=224)
+                                <-Reducer 26 [CUSTOM_SIMPLE_EDGE] vectorized
+                                  PARTITION_ONLY_SHUFFLE [RS_674]
+                                    Group By Operator [GBY_672] (rows=1 width=224)
                                       Output:["_col0"],aggregations:["max(_col1)"]
-                                      Select Operator [SEL_663] (rows=348477374 width=88)
+                                      Select Operator [SEL_670] (rows=348477374 width=88)
                                         Output:["_col1"]
-                                         Please refer to the previous Group By Operator [GBY_661]
+                                         Please refer to the previous Group By Operator [GBY_668]
                     <-Reducer 3 [SIMPLE_EDGE]
                       SHUFFLE [RS_120]
                         PartitionCols:_col1
-                        Merge Join Operator [MERGEJOIN_579] (rows=348467716 width=135)
-                          Conds:RS_117._col2=RS_631._col0(Inner),Output:["_col1","_col3","_col4"]
+                        Merge Join Operator [MERGEJOIN_583] (rows=348467716 width=135)
+                          Conds:RS_117._col2=RS_638._col0(Inner),Output:["_col1","_col3","_col4"]
                         <-Reducer 16 [SIMPLE_EDGE] vectorized
-                          SHUFFLE [RS_631]
+                          SHUFFLE [RS_638]
                             PartitionCols:_col0
-                             Please refer to the previous Group By Operator [GBY_630]
+                             Please refer to the previous Group By Operator [GBY_637]
                         <-Reducer 2 [SIMPLE_EDGE]
                           SHUFFLE [RS_117]
                             PartitionCols:_col2
-                            Merge Join Operator [MERGEJOIN_563] (rows=316788826 width=135)
-                              Conds:RS_640._col0=RS_597._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
+                            Merge Join Operator [MERGEJOIN_567] (rows=316788826 width=135)
+                              Conds:RS_647._col0=RS_601._col0(Inner),Output:["_col1","_col2","_col3","_col4"]
                             <-Map 7 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_597]
+                              SHUFFLE [RS_601]
                                 PartitionCols:_col0
-                                 Please refer to the previous Select Operator [SEL_596]
+                                 Please refer to the previous Select Operator [SEL_600]
                             <-Map 1 [SIMPLE_EDGE] vectorized
-                              SHUFFLE [RS_640]
+                              SHUFFLE [RS_647]
                                 PartitionCols:_col0
-                                Select Operator [SEL_639] (rows=287989836 width=135)
+                                Select Operator [SEL_646] (rows=287989836 width=135)
                                   Output:["_col0","_col1","_col2","_col3","_col4"]
-                                  Filter Operator [FIL_638] (rows=287989836 width=135)
+                                  Filter Operator [FIL_645] (rows=287989836 width=135)
                                     predicate:((cs_item_sk BETWEEN DynamicValue(RS_118_item_i_item_sk_min) AND DynamicValue(RS_118_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_118_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_115_date_dim_d_date_sk_min) AND DynamicValue(RS_115_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_115_date_dim_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
                                     TableScan [TS_0] (rows=287989836 width=135)
                                       default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_quantity","cs_list_price"]
                                     <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_637]
-                                        Group By Operator [GBY_636] (rows=1 width=20)
+                                      BROADCAST [RS_644]
+                                        Group By Operator [GBY_643] (rows=1 width=20)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=58079560)"]
                                         <-Reducer 16 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_635]
-                                            Group By Operator [GBY_634] (rows=1 width=20)
+                                          SHUFFLE [RS_642]
+                                            Group By Operator [GBY_641] (rows=1 width=20)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=58079560)"]
-                                              Select Operator [SEL_632] (rows=58079562 width=88)
+                                              Select Operator [SEL_639] (rows=58079562 width=88)
                                                 Output:["_col0"]
-                                                 Please refer to the previous Group By Operator [GBY_630]
+                                                 Please refer to the previous Group By Operator [GBY_637]
                                     <-Reducer 8 [BROADCAST_EDGE] vectorized
-                                      BROADCAST [RS_606]
-                                        Group By Operator [GBY_605] (rows=1 width=12)
+                                      BROADCAST [RS_610]
+                                        Group By Operator [GBY_609] (rows=1 width=12)
                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                         <-Map 7 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_603]
-                                            Group By Operator [GBY_601] (rows=1 width=12)
+                                          SHUFFLE [RS_607]
+                                            Group By Operator [GBY_605] (rows=1 width=12)
                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                              Select Operator [SEL_598] (rows=18262 width=1119)
+                                              Select Operator [SEL_602] (rows=18262 width=1119)
                                                 Output:["_col0"]
-                                                 Please refer to the previous Select Operator [SEL_596]
+                                                 Please refer to the previous Select Operator [SEL_600]
 


[35/48] hive git commit: HIVE-19668 : Over 30% of the heap wasted by duplicate org.antlr.runtime.CommonToken's and duplicate strings (Misha Dmitriev reviewed by Aihua Xu and Vihang Karajgaonkar)

Posted by se...@apache.org.
HIVE-19668 : Over 30% of the heap wasted by duplicate org.antlr.runtime.CommonToken's and duplicate strings (Misha Dmitriev reviewed by Aihua Xu and Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/85a3dd7a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/85a3dd7a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/85a3dd7a

Branch: refs/heads/master-txnstats
Commit: 85a3dd7a71e9060b8d2d4cd8a73c19afda748eca
Parents: 34adf31
Author: Misha Dmitriev <mi...@cloudera.com>
Authored: Mon Jul 16 15:59:27 2018 -0700
Committer: Vihang Karajgaonkar <vi...@cloudera.com>
Committed: Mon Jul 16 15:59:27 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/parse/ASTNode.java    |  31 +++++-
 .../hadoop/hive/ql/parse/ASTNodeOrigin.java     |   4 +-
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |  43 ++++++--
 .../hive/ql/parse/ImmutableCommonToken.java     | 107 +++++++++++++++++++
 .../hadoop/hive/ql/parse/ParseDriver.java       |  16 ++-
 .../org/apache/hadoop/hive/ql/parse/QBExpr.java |   5 +-
 .../hadoop/hive/ql/parse/QBParseInfo.java       |   5 +-
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  21 +++-
 .../hadoop/hive/ql/parse/SubQueryUtils.java     |  11 +-
 9 files changed, 212 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
index 9f63f62..7b32020 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNode.java
@@ -24,10 +24,13 @@ import java.util.ArrayList;
 import java.util.Deque;
 import java.util.List;
 
+import com.google.common.collect.Interner;
+import com.google.common.collect.Interners;
 import org.antlr.runtime.Token;
 import org.antlr.runtime.tree.CommonTree;
 import org.antlr.runtime.tree.Tree;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.ql.lib.Node;
 
 /**
@@ -43,17 +46,16 @@ public class ASTNode extends CommonTree implements Node,Serializable {
   private transient boolean isValidASTStr;
   private transient boolean visited = false;
 
+  private static final Interner<ImmutableCommonToken> TOKEN_CACHE = Interners.newWeakInterner();
+
   public ASTNode() {
   }
 
   /**
-   * Constructor.
-   *
-   * @param t
-   *          Token for the CommonTree Node
+   * @param t Token for the CommonTree Node
    */
   public ASTNode(Token t) {
-    super(t);
+    super(internToken(t));
   }
 
   public ASTNode(ASTNode node) {
@@ -282,6 +284,13 @@ public class ASTNode extends CommonTree implements Node,Serializable {
   }
 
   @Override
+  protected List createChildrenList() {
+    // Measurements show that in most situations the number of children is small.
+    // Avoid wasting memory by creating ArrayList with the default capacity of 10.
+    return new ArrayList(2);
+  }
+
+  @Override
   public String toStringTree() {
 
     // The root might have changed because of tree modifications.
@@ -346,4 +355,16 @@ public class ASTNode extends CommonTree implements Node,Serializable {
 
     return rootNode.getMemoizedSubString(startIndx, endIndx);
   }
+
+  private static Token internToken(Token t) {
+    if (t == null) {
+      return null;
+    }
+    if (t instanceof ImmutableCommonToken) {
+      return TOKEN_CACHE.intern((ImmutableCommonToken) t);
+    } else {
+      t.setText(StringInternUtils.internIfNotNull(t.getText()));
+      return t;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
index 8d812e4..3964c33 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ASTNodeOrigin.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import org.apache.hadoop.hive.common.StringInternUtils;
+
 /**
  * ASTNodeOrigin contains contextual information about the object from whose
  * definition a particular ASTNode originated. For example, suppose a view v is
@@ -46,7 +48,7 @@ public class ASTNodeOrigin {
       String objectDefinition, String usageAlias, ASTNode usageNode) {
     this.objectType = objectType;
     this.objectName = objectName;
-    this.objectDefinition = objectDefinition;
+    this.objectDefinition = StringInternUtils.internIfNotNull(objectDefinition);
     this.usageAlias = usageAlias;
     this.usageNode = usageNode;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index fecfd0c..a0cdcb6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -308,6 +308,27 @@ public class CalcitePlanner extends SemanticAnalyzer {
   private boolean disableSemJoinReordering = true;
   private EnumSet<ExtendedCBOProfile> profilesCBO;
 
+  private static final CommonToken FROM_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_FROM, "TOK_FROM");
+  private static final CommonToken DEST_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_DESTINATION, "TOK_DESTINATION");
+  private static final CommonToken DIR_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_DIR, "TOK_DIR");
+  private static final CommonToken TMPFILE_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_TMP_FILE, "TOK_TMP_FILE");
+  private static final CommonToken SELECT_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_SELECT, "TOK_SELECT");
+  private static final CommonToken SELEXPR_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR");
+  private static final CommonToken TABLEORCOL_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
+  private static final CommonToken INSERT_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_INSERT, "TOK_INSERT");
+  private static final CommonToken QUERY_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_QUERY, "TOK_QUERY");
+  private static final CommonToken SUBQUERY_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY");
+
   public CalcitePlanner(QueryState queryState) throws SemanticException {
     super(queryState);
     if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED)) {
@@ -726,45 +747,45 @@ public class CalcitePlanner extends SemanticAnalyzer {
     //              TOK_TMP_FILE
     //        TOK_SELECT
     //           refs
-    ASTNode from = new ASTNode(new CommonToken(HiveParser.TOK_FROM, "TOK_FROM"));
+    ASTNode from = new ASTNode(FROM_TOKEN);
     from.addChild((ASTNode) ParseDriver.adaptor.dupTree(nodeOfInterest));
-    ASTNode destination = new ASTNode(new CommonToken(HiveParser.TOK_DESTINATION, "TOK_DESTINATION"));
-    ASTNode dir = new ASTNode(new CommonToken(HiveParser.TOK_DIR, "TOK_DIR"));
-    ASTNode tmpFile = new ASTNode(new CommonToken(HiveParser.TOK_TMP_FILE, "TOK_TMP_FILE"));
+    ASTNode destination = new ASTNode(DEST_TOKEN);
+    ASTNode dir = new ASTNode(DIR_TOKEN);
+    ASTNode tmpFile = new ASTNode(TMPFILE_TOKEN);
     dir.addChild(tmpFile);
     destination.addChild(dir);
-    ASTNode select = new ASTNode(new CommonToken(HiveParser.TOK_SELECT, "TOK_SELECT"));
+    ASTNode select = new ASTNode(SELECT_TOKEN);
     int num = 0;
     for (Collection<Object> selectIdentifier : aliasNodes.asMap().values()) {
       Iterator<Object> it = selectIdentifier.iterator();
       ASTNode node = (ASTNode) it.next();
       // Add select expression
-      ASTNode selectExpr = new ASTNode(new CommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR"));
+      ASTNode selectExpr = new ASTNode(SELEXPR_TOKEN);
       selectExpr.addChild((ASTNode) ParseDriver.adaptor.dupTree(node)); // Identifier
       String colAlias = "col" + num;
       selectExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias))); // Alias
       select.addChild(selectExpr);
       // Rewrite all INSERT references (all the node values for this key)
-      ASTNode colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
+      ASTNode colExpr = new ASTNode(TABLEORCOL_TOKEN);
       colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
       replaceASTChild(node, colExpr);
       while (it.hasNext()) {
         // Loop to rewrite rest of INSERT references
         node = (ASTNode) it.next();
-        colExpr = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL"));
+        colExpr = new ASTNode(TABLEORCOL_TOKEN);
         colExpr.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, colAlias)));
         replaceASTChild(node, colExpr);
       }
       num++;
     }
-    ASTNode insert = new ASTNode(new CommonToken(HiveParser.TOK_INSERT, "TOK_INSERT"));
+    ASTNode insert = new ASTNode(INSERT_TOKEN);
     insert.addChild(destination);
     insert.addChild(select);
-    ASTNode newQuery = new ASTNode(new CommonToken(HiveParser.TOK_QUERY, "TOK_QUERY"));
+    ASTNode newQuery = new ASTNode(QUERY_TOKEN);
     newQuery.addChild(from);
     newQuery.addChild(insert);
     // 3. create subquery
-    ASTNode subq = new ASTNode(new CommonToken(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY"));
+    ASTNode subq = new ASTNode(SUBQUERY_TOKEN);
     subq.addChild(newQuery);
     subq.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, "subq")));
     replaceASTChild(nodeOfInterest, subq);

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
new file mode 100644
index 0000000..d8264dd
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImmutableCommonToken.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.parse;
+
+import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonToken;
+import org.antlr.runtime.Token;
+
+/**
+ * This class is designed to hold "constant" CommonTokens, that have fixed type
+ * and text, and everything else equal to zero. They can therefore be reused
+ * to save memory. However, to support reuse (canonicalization) we need to
+ * implement the proper hashCode() and equals() methods.
+ */
+class ImmutableCommonToken extends CommonToken {
+
+  private static final String SETTERS_DISABLED = "All setter methods are intentionally disabled";
+
+  private final int hashCode;
+
+  ImmutableCommonToken(int type, String text) {
+    super(type, text);
+    hashCode = calculateHash();
+  }
+
+  private int calculateHash() {
+    return type * 31 + (text != null ? text.hashCode() : 0);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof ImmutableCommonToken)) {
+      return false;
+    }
+    ImmutableCommonToken otherToken = (ImmutableCommonToken) other;
+    return type == otherToken.type &&
+        ((text == null && otherToken.text == null) ||
+          text != null && text.equals(otherToken.text));
+  }
+
+  @Override
+  public int hashCode() { return hashCode; }
+
+  // All the setter methods are overridden to throw exception, to prevent accidental
+  // attempts to modify data fields that should be immutable.
+
+  @Override
+  public void setLine(int line) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setText(String text) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setCharPositionInLine(int charPositionInLine) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setChannel(int channel) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setType(int type) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setStartIndex(int start) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setStopIndex(int stop) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setTokenIndex(int index) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+
+  @Override
+  public void setInputStream(CharStream input) {
+    throw new UnsupportedOperationException(SETTERS_DISABLED);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
index 895c2f2..f707451 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.parse;
 import java.util.ArrayList;
 import org.antlr.runtime.ANTLRStringStream;
 import org.antlr.runtime.CharStream;
+import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.NoViableAltException;
 import org.antlr.runtime.RecognitionException;
 import org.antlr.runtime.Token;
@@ -147,10 +148,19 @@ public class ParseDriver {
     }
 
     @Override
-    public Object dupNode(Object t) {
+    public Token createToken(int tokenType, String text) {
+      if (tokenType == HiveParser.TOK_SETCOLREF) {
+        // ParseUtils.processSetColsNode() can change type of TOK_SETCOLREF nodes later
+        return new CommonToken(tokenType, text);
+      } else {
+        return new ImmutableCommonToken(tokenType, text);
+      }
+    }
 
+    @Override
+    public Object dupNode(Object t) {
       return create(((CommonTree)t).token);
-    };
+    }
 
     @Override
     public Object dupTree(Object t, Object parent) {
@@ -166,7 +176,7 @@ public class ParseDriver {
     @Override
     public Object errorNode(TokenStream input, Token start, Token stop, RecognitionException e) {
       return new ASTErrorNode(input, start, stop, e);
-    };
+    }
   };
 
   public ASTNode parse(String command) throws ParseException {

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java
index f36f7f7..e65f126 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBExpr.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hive.ql.parse;
 
+import org.apache.hadoop.hive.common.StringInternUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,11 +50,11 @@ public class QBExpr {
   }
 
   public void setAlias(String alias) {
-    this.alias = alias;
+    this.alias = StringInternUtils.internIfNotNull(alias);
   }
 
   public QBExpr(String alias) {
-    this.alias = alias;
+    setAlias(alias);
   }
 
   public QBExpr(QB qb) {

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
index 5789ee0..ed0da84 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBParseInfo.java
@@ -29,6 +29,7 @@ import java.util.Set;
 import java.util.AbstractMap.SimpleEntry;
 
 import org.antlr.runtime.tree.Tree;
+import org.apache.hadoop.hive.common.StringInternUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext;
@@ -147,7 +148,7 @@ public class QBParseInfo {
     destToWindowingExprs = new LinkedHashMap<String, LinkedHashMap<String, ASTNode>>();
     destToDistinctFuncExprs = new HashMap<String, List<ASTNode>>();
 
-    this.alias = alias;
+    this.alias = StringInternUtils.internIfNotNull(alias);
     this.isSubQ = isSubQ;
     outerQueryLimit = -1;
 
@@ -478,7 +479,7 @@ public class QBParseInfo {
   }
 
   public void setExprToColumnAlias(ASTNode expr, String alias) {
-    exprToColumnAlias.put(expr,  alias);
+    exprToColumnAlias.put(expr,  StringInternUtils.internIfNotNull(alias));
   }
 
   public void setDestLimit(String dest, Integer offset, Integer limit) {

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
index 0ca9b58..a8e235e 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.StatsSetupConst.StatDB;
+import org.apache.hadoop.hive.common.StringInternUtils;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
@@ -396,6 +397,15 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   private String invalidQueryMaterializationReason;
 
+  private static final CommonToken SELECTDI_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_SELECTDI, "TOK_SELECTDI");
+  private static final CommonToken SELEXPR_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR");
+  private static final CommonToken TABLEORCOL_TOKEN =
+      new ImmutableCommonToken(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL");
+  private static final CommonToken DOT_TOKEN =
+      new ImmutableCommonToken(HiveParser.DOT, ".");
+
   static class Phase1Ctx {
     String dest;
     int nextNum;
@@ -14562,7 +14572,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
 
   public static ASTNode genSelectDIAST(RowResolver rr) {
     LinkedHashMap<String, LinkedHashMap<String, ColumnInfo>> map = rr.getRslvMap();
-    ASTNode selectDI = new ASTNode(new CommonToken(HiveParser.TOK_SELECTDI, "TOK_SELECTDI"));
+    ASTNode selectDI = new ASTNode(SELECTDI_TOKEN);
     // Note: this will determine the order of columns in the result. For now, the columns for each
     //       table will be together; the order of the tables, as well as the columns within each
     //       table, is deterministic, but undefined - RR stores them in the order of addition.
@@ -14574,10 +14584,11 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
     return selectDI;
   }
   private static ASTNode buildSelExprSubTree(String tableAlias, String col) {
-    ASTNode selexpr = new ASTNode(new CommonToken(HiveParser.TOK_SELEXPR, "TOK_SELEXPR"));
-    ASTNode tableOrCol = new ASTNode(new CommonToken(HiveParser.TOK_TABLE_OR_COL,
-        "TOK_TABLE_OR_COL"));
-    ASTNode dot = new ASTNode(new CommonToken(HiveParser.DOT, "."));
+    tableAlias = StringInternUtils.internIfNotNull(tableAlias);
+    col = StringInternUtils.internIfNotNull(col);
+    ASTNode selexpr = new ASTNode(SELEXPR_TOKEN);
+    ASTNode tableOrCol = new ASTNode(TABLEORCOL_TOKEN);
+    ASTNode dot = new ASTNode(DOT_TOKEN);
     tableOrCol.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, tableAlias)));
     dot.addChild(tableOrCol);
     dot.addChild(new ASTNode(new CommonToken(HiveParser.Identifier, col)));

http://git-wip-us.apache.org/repos/asf/hive/blob/85a3dd7a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
index e8509ee..3c4e3d5 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SubQueryUtils.java
@@ -24,6 +24,7 @@ import java.util.Deque;
 import java.util.List;
 import java.util.Map;
 
+import org.antlr.runtime.CommonToken;
 import org.antlr.runtime.tree.CommonTreeAdaptor;
 import org.apache.hadoop.hive.ql.Context;
 import org.apache.hadoop.hive.ql.ErrorMsg;
@@ -132,11 +133,17 @@ public class SubQueryUtils {
         ASTNode child = (ASTNode) node.getChild(0);
         if (child == subQuery) {
           ASTNode sqOpType = (ASTNode) subQuery.getChild(0).getChild(0);
+          ASTNode newSqOpType;
+          // We create a new ASTNode below because its current token is likely an
+          // ImmutableCommonToken, whose type cannot be modified.
           if (sqOpType.getType() == HiveParser.KW_EXISTS) {
-            sqOpType.getToken().setType(HiveParser.TOK_SUBQUERY_OP_NOTEXISTS);
+            newSqOpType = new ASTNode(new CommonToken(
+                HiveParser.TOK_SUBQUERY_OP_NOTEXISTS, "TOK_SUBQUERY_OP_NOTEXISTS"));
           } else {
-            sqOpType.getToken().setType(HiveParser.TOK_SUBQUERY_OP_NOTIN);
+            newSqOpType = new ASTNode(new CommonToken(
+                HiveParser.TOK_SUBQUERY_OP_NOTIN, "TOK_SUBQUERY_OP_NOTIN"));
           }
+          subQuery.getChild(0).setChild(0, newSqOpType);
           ASTNode parent = getParentInWhereClause(node);
           if (parent == null) {
             root = subQuery;


[32/48] hive git commit: HIVE-20174: Vectorization: Fix NULL / Wrong Results issues in GROUP BY Aggregation Functions (Matt McCline, reviewed by Teddy Choi)

Posted by se...@apache.org.
HIVE-20174: Vectorization: Fix NULL / Wrong Results issues in GROUP BY Aggregation Functions (Matt McCline, reviewed by Teddy Choi)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0966a383
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0966a383
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0966a383

Branch: refs/heads/master-txnstats
Commit: 0966a383d48348c36c270ddbcba2b4516c6f3a24
Parents: 64ceb7b
Author: Matt McCline <mm...@hortonworks.com>
Authored: Mon Jul 16 09:14:44 2018 -0500
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Mon Jul 16 09:14:44 2018 -0500

----------------------------------------------------------------------
 .../UDAFTemplates/VectorUDAFAvg.txt             | 108 +--
 .../UDAFTemplates/VectorUDAFAvgDecimal.txt      |  83 +--
 .../VectorUDAFAvgDecimal64ToDecimal.txt         | 110 ++-
 .../UDAFTemplates/VectorUDAFAvgDecimalMerge.txt |  35 +-
 .../UDAFTemplates/VectorUDAFAvgMerge.txt        |  35 +-
 .../UDAFTemplates/VectorUDAFAvgTimestamp.txt    | 136 +---
 .../UDAFTemplates/VectorUDAFMinMax.txt          |  38 +-
 .../UDAFTemplates/VectorUDAFMinMaxDecimal.txt   |  98 +--
 .../VectorUDAFMinMaxIntervalDayTime.txt         | 102 +--
 .../UDAFTemplates/VectorUDAFMinMaxString.txt    |  50 +-
 .../UDAFTemplates/VectorUDAFMinMaxTimestamp.txt | 104 +--
 .../UDAFTemplates/VectorUDAFSum.txt             |  34 +-
 .../UDAFTemplates/VectorUDAFVar.txt             |   9 +-
 .../UDAFTemplates/VectorUDAFVarDecimal.txt      |  19 +-
 .../UDAFTemplates/VectorUDAFVarMerge.txt        |  60 +-
 .../UDAFTemplates/VectorUDAFVarTimestamp.txt    |  19 +-
 .../ql/exec/vector/VectorAggregationDesc.java   |  19 +-
 .../aggregates/VectorAggregateExpression.java   |   2 +-
 .../aggregates/VectorUDAFCountMerge.java        |  34 +-
 .../aggregates/VectorUDAFSumDecimal.java        |  37 +-
 .../aggregates/VectorUDAFSumDecimal64.java      |  34 +-
 .../VectorUDAFSumDecimal64ToDecimal.java        |  34 +-
 .../aggregates/VectorUDAFSumTimestamp.java      |  34 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |  54 +-
 .../hive/ql/udf/generic/GenericUDAFAverage.java |  21 +
 .../ql/udf/generic/GenericUDAFVariance.java     |  19 +-
 .../exec/vector/TestVectorGroupByOperator.java  |  52 +-
 .../ql/exec/vector/VectorRandomBatchSource.java |  51 +-
 .../ql/exec/vector/VectorRandomRowSource.java   | 253 ++++++-
 .../vector/aggregation/AggregationBase.java     | 473 +++++++++++++
 .../aggregation/TestVectorAggregation.java      | 664 +++++++++++++++++++
 .../expressions/TestVectorDateAddSub.java       |   2 +
 .../vector/expressions/TestVectorDateDiff.java  |   2 +
 .../expressions/TestVectorIfStatement.java      |   4 +
 .../vector/expressions/TestVectorNegative.java  |   5 +
 .../expressions/TestVectorStringConcat.java     |   4 +
 .../expressions/TestVectorStringUnary.java      |   2 +
 .../vector/expressions/TestVectorSubStr.java    |   2 +
 .../expressions/TestVectorTimestampExtract.java |   4 +
 .../ql/optimizer/physical/TestVectorizer.java   |   5 +-
 40 files changed, 1835 insertions(+), 1016 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
index fc3d01f..cf5cc69 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvg.txt
@@ -56,20 +56,9 @@ public class <ClassName> extends VectorAggregateExpression {
       transient private double sum;
       transient private long count;
 
-      /**
-      * Value is explicitly (re)initialized in reset()
-      */
-      transient private boolean isNull = true;
-
       public void avgValue(<ValueType> value) {
-        if (isNull) {
-          sum = value;
-          count = 1;
-          isNull = false;
-        } else {
-          sum += value;
-          count++;
-        }
+        sum += value;
+        count++;
       }
 
       @Override
@@ -79,7 +68,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
       @Override
       public void reset () {
-        isNull = true;
         sum = 0;
         count = 0L;
       }
@@ -151,15 +139,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            vector[0], batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -219,28 +201,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      <ValueType> value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.avgValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,
@@ -321,11 +281,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
       if (inputVector.isRepeating) {
         if (inputVector.noNulls || !inputVector.isNull[0]) {
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.sum = 0;
-            myagg.count = 0;
-          }
           myagg.sum += vector[0]*batchSize;
           myagg.count += batchSize;
         }
@@ -353,14 +308,8 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          <ValueType> value = vector[i];
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.sum = 0;
-            myagg.count = 0;
-          }
-          myagg.sum += value;
-          myagg.count += 1;
+          myagg.sum += vector[i];
+          myagg.count++;
         }
       }
     }
@@ -371,16 +320,9 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum = 0;
-        myagg.count = 0;
-      }
-
       for (int i=0; i< batchSize; ++i) {
-        <ValueType> value = vector[selected[i]];
-        myagg.sum += value;
-        myagg.count += 1;
+        myagg.sum += vector[selected[i]];
+        myagg.count++;
       }
     }
 
@@ -392,13 +334,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          <ValueType> value = vector[i];
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.sum = 0;
-            myagg.count = 0;
-          }
-          myagg.sum += value;
+          myagg.sum += vector[i];
           myagg.count += 1;
         }
       }
@@ -408,15 +344,9 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg,
         <ValueType>[] vector,
         int batchSize) {
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum = 0;
-        myagg.count = 0;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        <ValueType> value = vector[i];
-        myagg.sum += value;
+        myagg.sum += vector[i];
         myagg.count += 1;
       }
     }
@@ -483,15 +413,11 @@ public class <ClassName> extends VectorAggregateExpression {
 #ENDIF COMPLETE
 
     Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
-    Preconditions.checkState(myagg.count > 0);
-    outputColVector.isNull[batchIndex] = false;
 
 #IF PARTIAL1
+    // For AVG, we do not mark NULL if all inputs were NULL.
+    outputColVector.isNull[batchIndex] = false;
+
     ColumnVector[] fields = outputColVector.fields;
     fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
     ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
@@ -506,6 +432,12 @@ public class <ClassName> extends VectorAggregateExpression {
 
 #ENDIF PARTIAL1
 #IF COMPLETE
+    if (myagg.count == 0) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
     outputColVector.vector[batchIndex] = myagg.sum / myagg.count;
 #ENDIF COMPLETE
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
index f512639..3caeecd 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal.txt
@@ -61,23 +61,11 @@ public class <ClassName> extends VectorAggregateExpression {
 
       transient private final HiveDecimalWritable sum = new HiveDecimalWritable();
       transient private long count;
-      transient private boolean isNull;
 
       public void avgValue(HiveDecimalWritable writable) {
-        if (isNull) {
-          // Make a copy since we intend to mutate sum.
-          sum.set(writable);
-          count = 1;
-          isNull = false;
-        } else {
-          // Note that if sum is out of range, mutateAdd will ignore the call.
-          // At the end, sum.isSet() can be checked for null.
-          sum.mutateAdd(writable);
-          count++;
-        }
-      }
 
-      public void avgValueNoNullCheck(HiveDecimalWritable writable) {
+        // Note that if sum is out of range, mutateAdd will ignore the call.
+        // At the end, sum.isSet() can be checked for null.
         sum.mutateAdd(writable);
         count++;
       }
@@ -89,7 +77,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
       @Override
       public void reset() {
-        isNull = true;
         sum.setFromLong(0L);
         count = 0;
       }
@@ -189,15 +176,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            vector[0], batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -257,28 +238,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      HiveDecimalWritable value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.avgValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,
@@ -360,11 +319,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
         if (inputVector.isRepeating) {
           if (inputVector.noNulls || !inputVector.isNull[0]) {
-            if (myagg.isNull) {
-              myagg.isNull = false;
-              myagg.sum.setFromLong(0L);
-              myagg.count = 0;
-            }
             HiveDecimal value = vector[0].getHiveDecimal();
             HiveDecimal multiple = value.multiply(HiveDecimal.create(batchSize));
             myagg.sum.mutateAdd(multiple);
@@ -408,14 +362,8 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum.setFromLong(0L);
-        myagg.count = 0;
-      }
-
       for (int i=0; i< batchSize; ++i) {
-        myagg.avgValueNoNullCheck(vector[selected[i]]);
+        myagg.avgValue(vector[selected[i]]);
       }
     }
 
@@ -436,14 +384,9 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg,
         HiveDecimalWritable[] vector,
         int batchSize) {
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum.setFromLong(0L);
-        myagg.count = 0;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        myagg.avgValueNoNullCheck(vector[i]);
+        myagg.avgValue(vector[i]);
       }
     }
 
@@ -509,12 +452,14 @@ public class <ClassName> extends VectorAggregateExpression {
 #ENDIF COMPLETE
 
     Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull || !myagg.sum.isSet()) {
+
+    // For AVG, we only mark NULL on actual overflow.
+    if (!myagg.sum.isSet()) {
       outputColVector.noNulls = false;
       outputColVector.isNull[batchIndex] = true;
       return;
     }
-    Preconditions.checkState(myagg.count > 0);
+
     outputColVector.isNull[batchIndex] = false;
 
 #IF PARTIAL1
@@ -532,6 +477,12 @@ public class <ClassName> extends VectorAggregateExpression {
 
 #ENDIF PARTIAL1
 #IF COMPLETE
+    // For AVG, we mark NULL on count 0 or on overflow.
+    if (myagg.count == 0 || !myagg.sum.isSet()) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
     tempDecWritable.setFromLong (myagg.count);
     HiveDecimalWritable result = outputColVector.vector[batchIndex];
     result.set(myagg.sum);

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal64ToDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal64ToDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal64ToDecimal.txt
index 53dceeb..39e0562 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal64ToDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimal64ToDecimal.txt
@@ -81,7 +81,6 @@ public class <ClassName> extends VectorAggregateExpression {
     /**
      * Value is explicitly (re)initialized in reset()
      */
-    private boolean isNull = true;
     private boolean usingRegularDecimal = false;
 
     public Aggregation(int inputScale, HiveDecimalWritable temp) {
@@ -90,26 +89,21 @@ public class <ClassName> extends VectorAggregateExpression {
     }
 
     public void avgValue(long value) {
-      if (isNull) {
-        sum = value;
-        count = 1;
-        isNull = false;
-      } else {
-        if (Math.abs(sum) > nearDecimal64Max) {
-          if (!usingRegularDecimal) {
-            usingRegularDecimal = true;
-            regularDecimalSum.deserialize64(sum, inputScale);
-          } else {
-            temp.deserialize64(sum, inputScale);
-            regularDecimalSum.mutateAdd(temp);
-          }
-          sum = value;
+
+      if (Math.abs(sum) > nearDecimal64Max) {
+        if (!usingRegularDecimal) {
+          usingRegularDecimal = true;
+          regularDecimalSum.deserialize64(sum, inputScale);
         } else {
-          sum += value;
+          temp.deserialize64(sum, inputScale);
+          regularDecimalSum.mutateAdd(temp);
         }
-
-        count++;
+        sum = value;
+      } else {
+        sum += value;
       }
+
+      count++;
     }
 
     @Override
@@ -119,7 +113,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
     @Override
     public void reset () {
-      isNull = true;
       usingRegularDecimal = false;
       sum = 0;
       regularDecimalSum.setFromLong(0);
@@ -202,15 +195,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              vector[0], batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            vector[0], batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -270,28 +257,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      long value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.avgValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,
@@ -502,8 +467,10 @@ public class <ClassName> extends VectorAggregateExpression {
 #ENDIF COMPLETE
 
     Aggregation myagg = (Aggregation) agg;
-    final boolean isNull;
-    if (!myagg.isNull) {
+
+#IF PARTIAL1
+    if (myagg.count > 0) {
+
       if (!myagg.usingRegularDecimal) {
         myagg.regularDecimalSum.deserialize64(myagg.sum, inputScale);
       } else {
@@ -511,19 +478,15 @@ public class <ClassName> extends VectorAggregateExpression {
         myagg.regularDecimalSum.mutateAdd(myagg.temp);
       }
 
-      isNull = !myagg.regularDecimalSum.isSet();
-    } else {
-      isNull = true;
-    }
-    if (isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
+      // For AVG, we only mark NULL on actual overflow.
+      if (!myagg.regularDecimalSum.isSet()) {
+        outputColVector.noNulls = false;
+        outputColVector.isNull[batchIndex] = true;
+        return;
+      }
     }
-    Preconditions.checkState(myagg.count > 0);
-    outputColVector.isNull[batchIndex] = false;
 
-#IF PARTIAL1
+    outputColVector.isNull[batchIndex] = false;
     ColumnVector[] fields = outputColVector.fields;
     fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
     ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
@@ -539,6 +502,27 @@ public class <ClassName> extends VectorAggregateExpression {
 
 #ENDIF PARTIAL1
 #IF COMPLETE
+    final boolean isNull;
+    if (myagg.count > 0) {
+      if (!myagg.usingRegularDecimal) {
+        myagg.regularDecimalSum.deserialize64(myagg.sum, inputScale);
+      } else {
+        myagg.temp.deserialize64(myagg.sum, inputScale);
+        myagg.regularDecimalSum.mutateAdd(myagg.temp);
+      }
+
+      isNull = !myagg.regularDecimalSum.isSet();
+    } else {
+      isNull = true;
+    }
+    if (isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    Preconditions.checkState(myagg.count > 0);
+    outputColVector.isNull[batchIndex] = false;
+
     temp.setFromLong (myagg.count);
     HiveDecimalWritable result = outputColVector.vector[batchIndex];
     result.set(myagg.regularDecimalSum);

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimalMerge.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimalMerge.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimalMerge.txt
index 5fe9256..3691c05 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimalMerge.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgDecimalMerge.txt
@@ -188,15 +188,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputStructColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], batchSize, batch.selected, inputStructColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], batchSize, inputStructColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            countVector[0], sumVector[0], batchSize, inputStructColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -260,29 +254,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      long count,
-      HiveDecimalWritable sum,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.merge(count, sum);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgMerge.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgMerge.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgMerge.txt
index 162d1ba..2e93efd 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgMerge.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgMerge.txt
@@ -154,15 +154,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputStructColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], batchSize, batch.selected, inputStructColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], batchSize, inputStructColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            countVector[0], sumVector[0], batchSize, inputStructColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -226,29 +220,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      long count,
-      double sum,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.merge(count, sum);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgTimestamp.txt
index 810f31f..358d108 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFAvgTimestamp.txt
@@ -59,20 +59,9 @@ public class <ClassName> extends VectorAggregateExpression {
       transient private double sum;
       transient private long count;
 
-      /**
-      * Value is explicitly (re)initialized in reset()
-      */
-      transient private boolean isNull = true;
-
-      public void sumValue(double value) {
-        if (isNull) {
-          sum = value;
-          count = 1;
-          isNull = false;
-        } else {
-          sum += value;
-          count++;
-        }
+      public void avgValue(double value) {
+        sum += value;
+        count++;
       }
 
       @Override
@@ -82,7 +71,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
       @Override
       public void reset() {
-        isNull = true;
         sum = 0;
         count = 0L;
       }
@@ -153,15 +141,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              inputColVector.getDouble(0), batchSize, batch.selected, inputColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              inputColVector.getDouble(0), batchSize, inputColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            inputColVector.getDouble(0), batchSize, inputColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -187,7 +169,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           bufferIndex,
           i);
-        myagg.sumValue(value);
+        myagg.avgValue(value);
       }
     }
 
@@ -203,7 +185,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           bufferIndex,
           i);
-        myagg.sumValue(
+        myagg.avgValue(
             inputColVector.getDouble(selection[i]));
       }
     }
@@ -218,45 +200,27 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           bufferIndex,
           i);
-        myagg.sumValue(inputColVector.getDouble(i));
+        myagg.avgValue(inputColVector.getDouble(i));
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
+    private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,
       double value,
       int batchSize,
-      int[] selection,
       boolean[] isNull) {
 
-      for (int i=0; i < batchSize; ++i) {
-        if (!isNull[selection[i]]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            bufferIndex,
-            i);
-          myagg.sumValue(value);
-        }
+      if (isNull[0]) {
+        return;
       }
 
-    }
-
-    private void iterateHasNullsRepeatingWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      double value,
-      int batchSize,
-      boolean[] isNull) {
-
       for (int i=0; i < batchSize; ++i) {
-        if (!isNull[i]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            bufferIndex,
-            i);
-          myagg.sumValue(value);
-        }
+        Aggregation myagg = getCurrentAggregationBuffer(
+          aggregationBufferSets,
+          bufferIndex,
+          i);
+        myagg.avgValue(value);
       }
     }
 
@@ -275,7 +239,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             bufferIndex,
             j);
-          myagg.sumValue(inputColVector.getDouble(i));
+          myagg.avgValue(inputColVector.getDouble(i));
         }
       }
    }
@@ -293,7 +257,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             bufferIndex,
             i);
-          myagg.sumValue(inputColVector.getDouble(i));
+          myagg.avgValue(inputColVector.getDouble(i));
         }
       }
    }
@@ -318,11 +282,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
         if (inputColVector.isRepeating) {
           if (inputColVector.noNulls || !inputColVector.isNull[0]) {
-            if (myagg.isNull) {
-              myagg.isNull = false;
-              myagg.sum = 0;
-              myagg.count = 0;
-            }
             myagg.sum += inputColVector.getDouble(0)*batchSize;
             myagg.count += batchSize;
           }
@@ -353,13 +312,7 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          double value = inputColVector.getDouble(i);
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.sum = 0;
-            myagg.count = 0;
-          }
-          myagg.sum += value;
+          myagg.sum += inputColVector.getDouble(i);
           myagg.count += 1;
         }
       }
@@ -371,15 +324,8 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum = 0;
-        myagg.count = 0;
-      }
-
       for (int i=0; i< batchSize; ++i) {
-        double value = inputColVector.getDouble(selected[i]);
-        myagg.sum += value;
+        myagg.sum += inputColVector.getDouble(selected[i]);
         myagg.count += 1;
       }
     }
@@ -392,13 +338,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          double value = inputColVector.getDouble(i);
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.sum = 0;
-            myagg.count = 0;
-          }
-          myagg.sum += value;
+          myagg.sum += inputColVector.getDouble(i);
           myagg.count += 1;
         }
       }
@@ -408,15 +348,9 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg,
         TimestampColumnVector inputColVector,
         int batchSize) {
-      if (myagg.isNull) {
-        myagg.isNull = false;
-        myagg.sum = 0;
-        myagg.count = 0;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        double value = inputColVector.getDouble(i);
-        myagg.sum += value;
+        myagg.sum += inputColVector.getDouble(i);
         myagg.count += 1;
       }
     }
@@ -475,23 +409,14 @@ public class <ClassName> extends VectorAggregateExpression {
   public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
       AggregationBuffer agg) throws HiveException {
 
+    Aggregation myagg = (Aggregation) agg;
+
 #IF PARTIAL1
     StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
-#ENDIF PARTIAL1
-#IF COMPLETE
-    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
-#ENDIF COMPLETE
 
-    Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
-    Preconditions.checkState(myagg.count > 0);
+    // For AVG, we do not mark NULL if all inputs were NULL.
     outputColVector.isNull[batchIndex] = false;
 
-#IF PARTIAL1
     ColumnVector[] fields = outputColVector.fields;
     fields[AVERAGE_COUNT_FIELD_INDEX].isNull[batchIndex] = false;
     ((LongColumnVector) fields[AVERAGE_COUNT_FIELD_INDEX]).vector[batchIndex] = myagg.count;
@@ -506,6 +431,15 @@ public class <ClassName> extends VectorAggregateExpression {
 
 #ENDIF PARTIAL1
 #IF COMPLETE
+    DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
+
+    if (myagg.count == 0) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
     outputColVector.vector[batchIndex] = myagg.sum / myagg.count;
 #ENDIF COMPLETE
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
index 2df45bb..3569d51 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMax.txt
@@ -145,15 +145,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              vector[0], batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregrateIndex,
+            vector[0], batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -213,28 +207,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregrateIndex,
-      <ValueType> value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          aggregrateIndex,
-          i);
-        myagg.minmaxValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
@@ -363,7 +335,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for (int i=0; i< batchSize; ++i) {
         <ValueType> value = vector[selected[i]];
-        myagg.minmaxValueNoCheck(value);
+        myagg.minmaxValue(value);
       }
     }
 
@@ -437,7 +409,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     <InputColumnVectorType> outputColVector = (<InputColumnVectorType>) batch.cols[columnNum];
     Aggregation myagg = (Aggregation) agg;
-       if (myagg.isNull) {
+    if (myagg.isNull) {
       outputColVector.noNulls = false;
       outputColVector.isNull[batchIndex] = true;
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
index 9c8ebcc..eb63301 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxDecimal.txt
@@ -63,7 +63,7 @@ public class <ClassName> extends VectorAggregateExpression {
         value = new HiveDecimalWritable();
       }
 
-      public void checkValue(HiveDecimalWritable writable, short scale) {
+      public void minmaxValue(HiveDecimalWritable writable, short scale) {
         if (isNull) {
           isNull = false;
           this.value.set(writable);
@@ -144,15 +144,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              vector[0], inputVector.scale, batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              vector[0], inputVector.scale, batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregrateIndex,
+            vector[0], inputVector.scale, batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -179,14 +173,14 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(value, scale);
+        myagg.minmaxValue(value, scale);
       }
     }
 
     private void iterateNoNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      HiveDecimalWritable[] values,
+      HiveDecimalWritable[] vector,
       short scale,
       int[] selection,
       int batchSize) {
@@ -196,14 +190,14 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(values[selection[i]], scale);
+        myagg.minmaxValue(vector[selection[i]], scale);
       }
     }
 
     private void iterateNoNullsWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      HiveDecimalWritable[] values,
+      HiveDecimalWritable[] vector,
       short scale,
       int batchSize) {
       for (int i=0; i < batchSize; ++i) {
@@ -211,31 +205,10 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(values[i], scale);
+        myagg.minmaxValue(vector[i], scale);
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregrateIndex,
-      HiveDecimalWritable value,
-      short scale,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      for (int i=0; i < batchSize; ++i) {
-        if (!isNull[selection[i]]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            aggregrateIndex,
-            i);
-          myagg.checkValue(value, scale);
-        }
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
@@ -253,14 +226,14 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(value, scale);
+        myagg.minmaxValue(value, scale);
       }
     }
 
     private void iterateHasNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      HiveDecimalWritable[] values,
+      HiveDecimalWritable[] vector,
       short scale,
       int batchSize,
       int[] selection,
@@ -273,7 +246,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             j);
-          myagg.checkValue(values[i], scale);
+          myagg.minmaxValue(vector[i], scale);
         }
       }
    }
@@ -281,7 +254,7 @@ public class <ClassName> extends VectorAggregateExpression {
     private void iterateHasNullsWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
-      HiveDecimalWritable[] values,
+      HiveDecimalWritable[] vector,
       short scale,
       int batchSize,
       boolean[] isNull) {
@@ -292,7 +265,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             i);
-          myagg.checkValue(values[i], scale);
+          myagg.minmaxValue(vector[i], scale);
         }
       }
    }
@@ -318,10 +291,8 @@ public class <ClassName> extends VectorAggregateExpression {
         HiveDecimalWritable[] vector = inputVector.vector;
 
         if (inputVector.isRepeating) {
-          if ((inputVector.noNulls || !inputVector.isNull[0]) &&
-            (myagg.isNull || (myagg.value.compareTo(vector[0]) <OperatorSymbol> 0))) {
-            myagg.isNull = false;
-            myagg.value.set(vector[0]);
+          if (inputVector.noNulls || !inputVector.isNull[0]) {
+            myagg.minmaxValue(vector[0], inputVector.scale);
           }
           return;
         }
@@ -353,14 +324,7 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          HiveDecimalWritable writable = vector[i];
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            myagg.value.set(writable);
-          }
-          else if (myagg.value.compareTo(writable) <OperatorSymbol> 0) {
-            myagg.value.set(writable);
-          }
+          myagg.minmaxValue(vector[i], scale);
         }
       }
     }
@@ -372,16 +336,8 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        myagg.value.set(vector[selected[0]]);
-        myagg.isNull = false;
-      }
-
       for (int i=0; i< batchSize; ++i) {
-        HiveDecimalWritable writable = vector[selected[i]];
-        if (myagg.value.compareTo(writable) <OperatorSymbol> 0) {
-          myagg.value.set(writable);
-        }
+        myagg.minmaxValue(vector[selected[i]], scale);
       }
     }
 
@@ -394,14 +350,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          HiveDecimalWritable writable = vector[i];
-          if (myagg.isNull) {
-            myagg.value.set(writable);
-            myagg.isNull = false;
-          }
-          else if (myagg.value.compareTo(writable) <OperatorSymbol> 0) {
-            myagg.value.set(writable);
-          }
+          myagg.minmaxValue(vector[i], scale);
         }
       }
     }
@@ -411,16 +360,9 @@ public class <ClassName> extends VectorAggregateExpression {
         HiveDecimalWritable[] vector,
         short scale,
         int batchSize) {
-      if (myagg.isNull) {
-        myagg.value.set(vector[0]);
-        myagg.isNull = false;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        HiveDecimalWritable writable = vector[i];
-        if (myagg.value.compareTo(writable) <OperatorSymbol> 0) {
-          myagg.value.set(writable);
-        }
+        myagg.minmaxValue(vector[i], scale);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
index 9a0a6e7..9fdf77c 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxIntervalDayTime.txt
@@ -62,7 +62,7 @@ public class <ClassName> extends VectorAggregateExpression {
         value = new HiveIntervalDayTime();
       }
 
-      public void checkValue(IntervalDayTimeColumnVector colVector, int index) {
+      public void minmaxValue(IntervalDayTimeColumnVector colVector, int index) {
         if (isNull) {
           isNull = false;
           colVector.intervalDayTimeUpdate(this.value, index);
@@ -141,15 +141,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              inputColVector, batchSize, batch.selected, inputColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              inputColVector, batchSize, inputColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregrateIndex,
+            inputColVector, batchSize, inputColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -176,7 +170,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregrateIndex,
           i);
         // Repeating use index 0.
-        myagg.checkValue(inputColVector, 0);
+        myagg.minmaxValue(inputColVector, 0);
       }
     }
 
@@ -192,7 +186,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColVector, selection[i]);
+        myagg.minmaxValue(inputColVector, selection[i]);
       }
     }
 
@@ -206,47 +200,28 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColVector, i);
+        myagg.minmaxValue(inputColVector, i);
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
+    private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
       IntervalDayTimeColumnVector inputColVector,
       int batchSize,
-      int[] selection,
       boolean[] isNull) {
 
-      for (int i=0; i < batchSize; ++i) {
-        if (!isNull[selection[i]]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            aggregrateIndex,
-            i);
-          // Repeating use index 0.
-          myagg.checkValue(inputColVector, 0);
-        }
+      if (isNull[0]) {
+        return;
       }
 
-    }
-
-    private void iterateHasNullsRepeatingWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregrateIndex,
-      IntervalDayTimeColumnVector inputColVector,
-      int batchSize,
-      boolean[] isNull) {
-
       for (int i=0; i < batchSize; ++i) {
-        if (!isNull[i]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            aggregrateIndex,
-            i);
-          // Repeating use index 0.
-          myagg.checkValue(inputColVector, 0);
-        }
+        Aggregation myagg = getCurrentAggregationBuffer(
+          aggregationBufferSets,
+          aggregrateIndex,
+          i);
+        // Repeating use index 0.
+        myagg.minmaxValue(inputColVector, 0);
       }
     }
 
@@ -265,7 +240,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             j);
-          myagg.checkValue(inputColVector, i);
+          myagg.minmaxValue(inputColVector, i);
         }
       }
    }
@@ -283,7 +258,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             i);
-          myagg.checkValue(inputColVector, i);
+          myagg.minmaxValue(inputColVector, i);
         }
       }
    }
@@ -307,10 +282,8 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg = (Aggregation)agg;
 
         if (inputColVector.isRepeating) {
-          if ((inputColVector.noNulls || !inputColVector.isNull[0]) &&
-            (myagg.isNull || (inputColVector.compareTo(myagg.value, 0) <OperatorSymbol> 0))) {
-            myagg.isNull = false;
-            inputColVector.intervalDayTimeUpdate(myagg.value, 0);
+          if (inputColVector.noNulls || !inputColVector.isNull[0]) {
+            myagg.minmaxValue(inputColVector, 0);
           }
           return;
         }
@@ -341,13 +314,7 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            inputColVector.intervalDayTimeUpdate(myagg.value, i);
-          }
-          else if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-            inputColVector.intervalDayTimeUpdate(myagg.value, i);
-          }
+          myagg.minmaxValue(inputColVector, i);
         }
       }
     }
@@ -358,16 +325,9 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        inputColVector.intervalDayTimeUpdate(myagg.value, selected[0]);
-        myagg.isNull = false;
-      }
-
       for (int i=0; i< batchSize; ++i) {
         int sel = selected[i];
-        if (inputColVector.compareTo(myagg.value, sel) <OperatorSymbol> 0) {
-          inputColVector.intervalDayTimeUpdate(myagg.value, sel);
-        }
+        myagg.minmaxValue(inputColVector, sel);
       }
     }
 
@@ -379,13 +339,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          if (myagg.isNull) {
-            inputColVector.intervalDayTimeUpdate(myagg.value, i);
-            myagg.isNull = false;
-          }
-          else if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-            inputColVector.intervalDayTimeUpdate(myagg.value, i);
-          }
+          myagg.minmaxValue(inputColVector, i);
         }
       }
     }
@@ -394,15 +348,9 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg,
         IntervalDayTimeColumnVector inputColVector,
         int batchSize) {
-      if (myagg.isNull) {
-        inputColVector.intervalDayTimeUpdate(myagg.value, 0);
-        myagg.isNull = false;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-          inputColVector.intervalDayTimeUpdate(myagg.value, i);
-        }
+        myagg.minmaxValue(inputColVector, i);
       }
     }
 
@@ -447,7 +395,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     IntervalDayTimeColumnVector outputColVector = (IntervalDayTimeColumnVector) batch.cols[columnNum];
     Aggregation myagg = (Aggregation) agg;
-       if (myagg.isNull) {
+    if (myagg.isNull) {
       outputColVector.noNulls = false;
       outputColVector.isNull[batchIndex] = true;
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
index 4f0b5a5..3387c0d 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxString.txt
@@ -60,7 +60,7 @@ public class <ClassName> extends VectorAggregateExpression {
       */
       transient private boolean isNull = true;
 
-      public void checkValue(byte[] bytes, int start, int length) {
+      public void minmaxValue(byte[] bytes, int start, int length) {
         if (isNull) {
           isNull = false;
           assign(bytes, start, length);
@@ -151,7 +151,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputColumn.isRepeating) {
-          // All nulls, no-op for min/max
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregrateIndex,
+            inputColumn, batchSize, inputColumn.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -180,7 +182,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(bytes, start, length);
+        myagg.minmaxValue(bytes, start, length);
       }
     }
 
@@ -197,7 +199,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColumn.vector[row],
+        myagg.minmaxValue(inputColumn.vector[row],
           inputColumn.start[row],
           inputColumn.length[row]);
       }
@@ -213,12 +215,36 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColumn.vector[i],
+        myagg.minmaxValue(inputColumn.vector[i],
           inputColumn.start[i],
           inputColumn.length[i]);
       }
     }
 
+    private void iterateHasNullsRepeatingWithAggregationSelection(
+      VectorAggregationBufferRow[] aggregationBufferSets,
+      int aggregrateIndex,
+      BytesColumnVector inputColumn,
+      int batchSize,
+      boolean[] isNull) {
+
+      if (isNull[0]) {
+        return;
+      }
+
+      for (int i=0; i < batchSize; ++i) {
+        Aggregation myagg = getCurrentAggregationBuffer(
+          aggregationBufferSets,
+          aggregrateIndex,
+          i);
+        // Repeating use index 0.
+        myagg.minmaxValue(inputColumn.vector[0],
+            inputColumn.start[0],
+            inputColumn.length[0]);
+      }
+
+    }
+
     private void iterateHasNullsSelectionWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
@@ -233,7 +259,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             i);
-          myagg.checkValue(inputColumn.vector[row],
+          myagg.minmaxValue(inputColumn.vector[row],
             inputColumn.start[row],
             inputColumn.length[row]);
         }
@@ -252,7 +278,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             i);
-          myagg.checkValue(inputColumn.vector[i],
+          myagg.minmaxValue(inputColumn.vector[i],
             inputColumn.start[i],
             inputColumn.length[i]);
         }
@@ -279,7 +305,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
         if (inputColumn.isRepeating) {
           if (inputColumn.noNulls || !inputColumn.isNull[0]) {
-            myagg.checkValue(inputColumn.vector[0],
+            myagg.minmaxValue(inputColumn.vector[0],
               inputColumn.start[0],
               inputColumn.length[0]);
           }
@@ -309,7 +335,7 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!inputColumn.isNull[i]) {
-          myagg.checkValue(inputColumn.vector[i],
+          myagg.minmaxValue(inputColumn.vector[i],
             inputColumn.start[i],
             inputColumn.length[i]);
         }
@@ -324,7 +350,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
-        myagg.checkValue(inputColumn.vector[i],
+        myagg.minmaxValue(inputColumn.vector[i],
           inputColumn.start[i],
           inputColumn.length[i]);
       }
@@ -337,7 +363,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for (int i=0; i< batchSize; ++i) {
         if (!inputColumn.isNull[i]) {
-          myagg.checkValue(inputColumn.vector[i],
+          myagg.minmaxValue(inputColumn.vector[i],
             inputColumn.start[i],
             inputColumn.length[i]);
         }
@@ -349,7 +375,7 @@ public class <ClassName> extends VectorAggregateExpression {
         BytesColumnVector inputColumn,
         int batchSize) {
       for (int i=0; i< batchSize; ++i) {
-        myagg.checkValue(inputColumn.vector[i],
+        myagg.minmaxValue(inputColumn.vector[i],
           inputColumn.start[i],
           inputColumn.length[i]);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
index 5114cda..b8d71d6 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFMinMaxTimestamp.txt
@@ -64,7 +64,7 @@ public class <ClassName> extends VectorAggregateExpression {
         value = new Timestamp(0);
       }
 
-      public void checkValue(TimestampColumnVector colVector, int index) {
+      public void minmaxValue(TimestampColumnVector colVector, int index) {
         if (isNull) {
           isNull = false;
           colVector.timestampUpdate(this.value, index);
@@ -143,15 +143,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              inputColVector, batchSize, batch.selected, inputColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregrateIndex,
-              inputColVector, batchSize, inputColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregrateIndex,
+            inputColVector, batchSize, inputColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -178,7 +172,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregrateIndex,
           i);
         // Repeating use index 0.
-        myagg.checkValue(inputColVector, 0);
+        myagg.minmaxValue(inputColVector, 0);
       }
     }
 
@@ -194,7 +188,7 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColVector, selection[i]);
+        myagg.minmaxValue(inputColVector, selection[i]);
       }
     }
 
@@ -208,47 +202,28 @@ public class <ClassName> extends VectorAggregateExpression {
           aggregationBufferSets,
           aggregrateIndex,
           i);
-        myagg.checkValue(inputColVector, i);
+        myagg.minmaxValue(inputColVector, i);
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
+    private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregrateIndex,
       TimestampColumnVector inputColVector,
       int batchSize,
-      int[] selection,
       boolean[] isNull) {
 
-      for (int i=0; i < batchSize; ++i) {
-        if (!isNull[selection[i]]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            aggregrateIndex,
-            i);
-          // Repeating use index 0.
-          myagg.checkValue(inputColVector, 0);
-        }
+      if (isNull[0]) {
+        return;
       }
 
-    }
-
-    private void iterateHasNullsRepeatingWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregrateIndex,
-      TimestampColumnVector inputColVector,
-      int batchSize,
-      boolean[] isNull) {
-
       for (int i=0; i < batchSize; ++i) {
-        if (!isNull[i]) {
-          Aggregation myagg = getCurrentAggregationBuffer(
-            aggregationBufferSets,
-            aggregrateIndex,
-            i);
-          // Repeating use index 0.
-          myagg.checkValue(inputColVector, 0);
-        }
+        Aggregation myagg = getCurrentAggregationBuffer(
+          aggregationBufferSets,
+          aggregrateIndex,
+          i);
+        // Repeating use index 0.
+        myagg.minmaxValue(inputColVector, 0);
       }
     }
 
@@ -267,7 +242,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             j);
-          myagg.checkValue(inputColVector, i);
+          myagg.minmaxValue(inputColVector, i);
         }
       }
    }
@@ -285,7 +260,7 @@ public class <ClassName> extends VectorAggregateExpression {
             aggregationBufferSets,
             aggregrateIndex,
             i);
-          myagg.checkValue(inputColVector, i);
+          myagg.minmaxValue(inputColVector, i);
         }
       }
    }
@@ -309,10 +284,8 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg = (Aggregation)agg;
 
         if (inputColVector.isRepeating) {
-          if ((inputColVector.noNulls || !inputColVector.isNull[0]) &&
-            (myagg.isNull || (inputColVector.compareTo(myagg.value, 0) <OperatorSymbol> 0))) {
-            myagg.isNull = false;
-            inputColVector.timestampUpdate(myagg.value, 0);
+          if (inputColVector.noNulls || !inputColVector.isNull[0]) {
+            myagg.minmaxValue(inputColVector, 0);
           }
           return;
         }
@@ -343,13 +316,7 @@ public class <ClassName> extends VectorAggregateExpression {
       for (int j=0; j< batchSize; ++j) {
         int i = selected[j];
         if (!isNull[i]) {
-          if (myagg.isNull) {
-            myagg.isNull = false;
-            inputColVector.timestampUpdate(myagg.value, i);
-          }
-          else if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-            inputColVector.timestampUpdate(myagg.value, i);
-          }
+          myagg.minmaxValue(inputColVector, i);
         }
       }
     }
@@ -360,16 +327,9 @@ public class <ClassName> extends VectorAggregateExpression {
         int batchSize,
         int[] selected) {
 
-      if (myagg.isNull) {
-        inputColVector.timestampUpdate(myagg.value, selected[0]);
-        myagg.isNull = false;
-      }
-
-      for (int i=0; i< batchSize; ++i) {
+     for (int i=0; i< batchSize; ++i) {
         int sel = selected[i];
-        if (inputColVector.compareTo(myagg.value, sel) <OperatorSymbol> 0) {
-          inputColVector.timestampUpdate(myagg.value, sel);
-        }
+        myagg.minmaxValue(inputColVector, sel);
       }
     }
 
@@ -381,13 +341,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
       for(int i=0;i<batchSize;++i) {
         if (!isNull[i]) {
-          if (myagg.isNull) {
-            inputColVector.timestampUpdate(myagg.value, i);
-            myagg.isNull = false;
-          }
-          else if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-            inputColVector.timestampUpdate(myagg.value, i);
-          }
+          myagg.minmaxValue(inputColVector, i);
         }
       }
     }
@@ -396,15 +350,9 @@ public class <ClassName> extends VectorAggregateExpression {
         Aggregation myagg,
         TimestampColumnVector inputColVector,
         int batchSize) {
-      if (myagg.isNull) {
-        inputColVector.timestampUpdate(myagg.value, 0);
-        myagg.isNull = false;
-      }
 
       for (int i=0;i<batchSize;++i) {
-        if (inputColVector.compareTo(myagg.value, i) <OperatorSymbol> 0) {
-          inputColVector.timestampUpdate(myagg.value, i);
-        }
+        myagg.minmaxValue(inputColVector, i);
       }
     }
 
@@ -449,7 +397,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     TimestampColumnVector outputColVector = (TimestampColumnVector) batch.cols[columnNum];
     Aggregation myagg = (Aggregation) agg;
-       if (myagg.isNull) {
+    if (myagg.isNull) {
       outputColVector.noNulls = false;
       outputColVector.isNull[batchIndex] = true;
       return;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
index c731869..548125e 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFSum.txt
@@ -142,15 +142,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              vector[0], batchSize, inputVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregateIndex,
+            vector[0], batchSize, inputVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -210,28 +204,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      <ValueType> value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          aggregateIndex,
-          i);
-        myagg.sumValue(value);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
index 876ead5..995190f 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVar.txt
@@ -127,7 +127,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     private void init() {
 #IF COMPLETE
-      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      String aggregateName = vecAggrDesc.getAggregationName();
       varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
@@ -490,11 +490,8 @@ public class <ClassName> extends VectorAggregateExpression {
     StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
 
     Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
+
+    // For Variance Family, we do not mark NULL if all inputs were NULL.
     outputColVector.isNull[batchIndex] = false;
 
     ColumnVector[] fields = outputColVector.fields;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
index cf19b14..a831610 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarDecimal.txt
@@ -138,7 +138,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     private void init() {
 #IF COMPLETE
-      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      String aggregateName = vecAggrDesc.getAggregationName();
       varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
@@ -450,15 +450,12 @@ public class <ClassName> extends VectorAggregateExpression {
   public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
       AggregationBuffer agg) throws HiveException {
 
+    Aggregation myagg = (Aggregation) agg;
+
 #IF PARTIAL1
     StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
 
-    Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
+    // For Variance Family, we do not mark NULL if all inputs were NULL.
     outputColVector.isNull[batchIndex] = false;
 
     ColumnVector[] fields = outputColVector.fields;
@@ -469,7 +466,13 @@ public class <ClassName> extends VectorAggregateExpression {
 #IF COMPLETE
     DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
 
-    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
     if (GenericUDAFVariance.isVarianceNull(myagg.count, varianceKind)) {
 
       // SQL standard - return null for zero (or 1 for sample) elements

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
index ccc5a22..dfc73a1 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarMerge.txt
@@ -60,27 +60,21 @@ public class <ClassName> extends VectorAggregateExpression {
       transient private double mergeSum;
       transient private double mergeVariance;
 
-      /**
-      * Value is explicitly (re)initialized in reset()
-      */
-      transient private boolean isNull = true;
-
       public void merge(long partialCount, double partialSum, double partialVariance) {
 
-        if (isNull || mergeCount == 0) {
+        if (mergeCount == 0) {
 
           // Just copy the information since there is nothing so far.
           mergeCount = partialCount;
           mergeSum = partialSum;
           mergeVariance = partialVariance;
-          isNull = false;
           return;
         }
 
         if (partialCount > 0 && mergeCount > 0) {
 
           // Merge the two partials.
-          mergeVariance +=
+          mergeVariance =
               GenericUDAFVariance.calculateMerge(
                   partialCount, mergeCount, partialSum, mergeSum,
                   partialVariance, mergeVariance);
@@ -98,7 +92,6 @@ public class <ClassName> extends VectorAggregateExpression {
 
       @Override
       public void reset () {
-        isNull = true;
         mergeCount = 0L;
         mergeSum = 0;
         mergeVariance = 0;
@@ -127,7 +120,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     private void init() {
 #IF FINAL
-      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      String aggregateName = vecAggrDesc.getAggregationName();
       varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF FINAL
     }
@@ -183,15 +176,9 @@ public class <ClassName> extends VectorAggregateExpression {
         }
       } else {
         if (inputStructColVector.isRepeating) {
-          if (batch.selectedInUse) {
-            iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], varianceVector[0], batchSize, batch.selected, inputStructColVector.isNull);
-          } else {
-            iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, bufferIndex,
-              countVector[0], sumVector[0], varianceVector[0], batchSize, inputStructColVector.isNull);
-          }
+          iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, bufferIndex,
+            countVector[0], sumVector[0], varianceVector[0], batchSize, inputStructColVector.isNull);
         } else {
           if (batch.selectedInUse) {
             iterateHasNullsSelectionWithAggregationSelection(
@@ -258,30 +245,6 @@ public class <ClassName> extends VectorAggregateExpression {
       }
     }
 
-    private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int bufferIndex,
-      long count,
-      double sum,
-      double variance,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-      if (isNull[0]) {
-        return;
-      }
-
-      for (int i=0; i < batchSize; ++i) {
-        Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          bufferIndex,
-          i);
-        myagg.merge(count, sum, variance);
-      }
-
-    }
-
     private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int bufferIndex,
@@ -488,8 +451,6 @@ public class <ClassName> extends VectorAggregateExpression {
 #ENDIF FINAL
      */
 
-/*
-    There seems to be a Wrong Results bug in VectorUDAFVarFinal -- disabling vectorization for now...
     return
         GenericUDAFVariance.isVarianceFamilyName(name) &&
         inputColVectorType == ColumnVector.Type.STRUCT &&
@@ -501,8 +462,6 @@ public class <ClassName> extends VectorAggregateExpression {
         outputColVectorType == ColumnVector.Type.DOUBLE &&
         mode == Mode.FINAL;
 #ENDIF FINAL
-*/
-    return false;
   }
 
   @Override
@@ -513,11 +472,8 @@ public class <ClassName> extends VectorAggregateExpression {
     StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
 
     Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
+
+    // For Variance Family, we do not mark NULL if all inputs were NULL.
     outputColVector.isNull[batchIndex] = false;
 
     ColumnVector[] fields = outputColVector.fields;

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
----------------------------------------------------------------------
diff --git a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
index 1dd5ab4..4e79f22 100644
--- a/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
+++ b/ql/src/gen/vectorization/UDAFTemplates/VectorUDAFVarTimestamp.txt
@@ -128,7 +128,7 @@ public class <ClassName> extends VectorAggregateExpression {
 
     private void init() {
 #IF COMPLETE
-      String aggregateName = vecAggrDesc.getAggrDesc().getGenericUDAFName();
+      String aggregateName = vecAggrDesc.getAggregationName();
       varianceKind = VarianceKind.nameMap.get(aggregateName);
 #ENDIF COMPLETE
     }
@@ -422,15 +422,12 @@ public class <ClassName> extends VectorAggregateExpression {
   public void assignRowColumn(VectorizedRowBatch batch, int batchIndex, int columnNum,
       AggregationBuffer agg) throws HiveException {
 
+   Aggregation myagg = (Aggregation) agg;
+
 #IF PARTIAL1
     StructColumnVector outputColVector = (StructColumnVector) batch.cols[columnNum];
 
-    Aggregation myagg = (Aggregation) agg;
-    if (myagg.isNull) {
-      outputColVector.noNulls = false;
-      outputColVector.isNull[batchIndex] = true;
-      return;
-    }
+    // For Variance Family, we do not mark NULL if all inputs were NULL.
     outputColVector.isNull[batchIndex] = false;
 
     ColumnVector[] fields = outputColVector.fields;
@@ -441,7 +438,13 @@ public class <ClassName> extends VectorAggregateExpression {
 #IF COMPLETE
     DoubleColumnVector outputColVector = (DoubleColumnVector) batch.cols[columnNum];
 
-    Aggregation myagg = (Aggregation) agg;
+    if (myagg.isNull) {
+      outputColVector.noNulls = false;
+      outputColVector.isNull[batchIndex] = true;
+      return;
+    }
+    outputColVector.isNull[batchIndex] = false;
+
     if (GenericUDAFVariance.isVarianceNull(myagg.count, varianceKind)) {
 
       // SQL standard - return null for zero (or 1 for sample) elements

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
index 5736399..417beec 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAggregationDesc.java
@@ -86,7 +86,7 @@ public class VectorAggregationDesc implements java.io.Serializable {
 
   private static final long serialVersionUID = 1L;
 
-  private final AggregationDesc aggrDesc;
+  private final String aggregationName;
 
   private final TypeInfo inputTypeInfo;
   private final ColumnVector.Type inputColVectorType;
@@ -99,15 +99,19 @@ public class VectorAggregationDesc implements java.io.Serializable {
   private final Class<? extends VectorAggregateExpression> vecAggrClass;
 
   private GenericUDAFEvaluator evaluator;
+  private GenericUDAFEvaluator.Mode udafEvaluatorMode;
 
-  public VectorAggregationDesc(AggregationDesc aggrDesc, GenericUDAFEvaluator evaluator,
+  public VectorAggregationDesc(String aggregationName, GenericUDAFEvaluator evaluator,
+      GenericUDAFEvaluator.Mode udafEvaluatorMode,
       TypeInfo inputTypeInfo, ColumnVector.Type inputColVectorType,
       VectorExpression inputExpression, TypeInfo outputTypeInfo,
       ColumnVector.Type outputColVectorType,
       Class<? extends VectorAggregateExpression> vecAggrClass) {
 
-    this.aggrDesc = aggrDesc;
+    this.aggregationName = aggregationName;
+
     this.evaluator = evaluator;
+    this.udafEvaluatorMode = udafEvaluatorMode;
 
     this.inputTypeInfo = inputTypeInfo;
     this.inputColVectorType = inputColVectorType;
@@ -122,8 +126,12 @@ public class VectorAggregationDesc implements java.io.Serializable {
     this.vecAggrClass = vecAggrClass;
   }
 
-  public AggregationDesc getAggrDesc() {
-    return aggrDesc;
+  public String getAggregationName() {
+    return aggregationName;
+  }
+
+  public GenericUDAFEvaluator.Mode getUdafEvaluatorMode() {
+    return udafEvaluatorMode;
   }
 
   public TypeInfo getInputTypeInfo() {
@@ -174,7 +182,6 @@ public class VectorAggregationDesc implements java.io.Serializable {
       sb.append("/");
       sb.append(outputDataTypePhysicalVariation);
     }
-    String aggregationName = aggrDesc.getGenericUDAFName();
     if (GenericUDAFVariance.isVarianceFamilyName(aggregationName)) {
       sb.append(" aggregation: ");
       sb.append(aggregationName);

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
index 3224557..2499f09 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorAggregateExpression.java
@@ -84,7 +84,7 @@ public abstract class VectorAggregateExpression  implements Serializable {
     outputTypeInfo =  vecAggrDesc.getOutputTypeInfo();
     outputDataTypePhysicalVariation = vecAggrDesc.getOutputDataTypePhysicalVariation();
 
-    mode = vecAggrDesc.getAggrDesc().getMode();
+    mode = vecAggrDesc.getUdafEvaluatorMode();
   }
 
   public VectorExpression getInputExpression() {

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
index 0463de5..bd781af 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFCountMerge.java
@@ -117,15 +117,9 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
       }
     } else {
       if (inputVector.isRepeating) {
-        if (batch.selectedInUse) {
-          iterateHasNullsRepeatingSelectionWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              vector[0], batchSize, batch.selected, inputVector.isNull);
-        } else {
-          iterateHasNullsRepeatingWithAggregationSelection(
-              aggregationBufferSets, aggregateIndex,
-              vector[0], batchSize, inputVector.isNull);
-        }
+        iterateHasNullsRepeatingWithAggregationSelection(
+            aggregationBufferSets, aggregateIndex,
+            vector[0], batchSize, inputVector.isNull);
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(
@@ -185,28 +179,6 @@ public class VectorUDAFCountMerge extends VectorAggregateExpression {
     }
   }
 
-  private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-      VectorAggregationBufferRow[] aggregationBufferSets,
-      int aggregateIndex,
-      long value,
-      int batchSize,
-      int[] selection,
-      boolean[] isNull) {
-
-    if (isNull[0]) {
-      return;
-    }
-
-    for (int i=0; i < batchSize; ++i) {
-      Aggregation myagg = getCurrentAggregationBuffer(
-          aggregationBufferSets,
-          aggregateIndex,
-          i);
-      myagg.value += value;
-    }
-    
-  }
-
   private void iterateHasNullsRepeatingWithAggregationSelection(
       VectorAggregationBufferRow[] aggregationBufferSets,
       int aggregateIndex,

http://git-wip-us.apache.org/repos/asf/hive/blob/0966a383/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
index 315b72b..469f610 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/aggregates/VectorUDAFSumDecimal.java
@@ -139,17 +139,10 @@ public class VectorUDAFSumDecimal extends VectorAggregateExpression {
       }
     } else {
       if (inputVector.isRepeating) {
-        if (batch.selectedInUse) {
-          iterateHasNullsRepeatingSelectionWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0],
-            batchSize, batch.selected, inputVector.isNull);
-        } else {
-          iterateHasNullsRepeatingWithAggregationSelection(
-            aggregationBufferSets, aggregateIndex,
-            vector[0],
-            batchSize, inputVector.isNull);
-        }
+        iterateHasNullsRepeatingWithAggregationSelection(
+          aggregationBufferSets, aggregateIndex,
+          vector[0],
+          batchSize, inputVector.isNull);
       } else {
         if (batch.selectedInUse) {
           iterateHasNullsSelectionWithAggregationSelection(
@@ -211,28 +204,6 @@ public class VectorUDAFSumDecimal extends VectorAggregateExpression {
     }
   }
 
-  private void iterateHasNullsRepeatingSelectionWithAggregationSelection(
-    VectorAggregationBufferRow[] aggregationBufferSets,
-    int aggregateIndex,
-    HiveDecimalWritable value,
-    int batchSize,
-    int[] selection,
-    boolean[] isNull) {
-
-    if (isNull[0]) {
-      return;
-    }
-
-    for (int i=0; i < batchSize; ++i) {
-      Aggregation myagg = getCurrentAggregationBuffer(
-        aggregationBufferSets,
-        aggregateIndex,
-        i);
-      myagg.sumValue(value);
-    }
-
-  }
-
   private void iterateHasNullsRepeatingWithAggregationSelection(
     VectorAggregationBufferRow[] aggregationBufferSets,
     int aggregateIndex,


[29/48] hive git commit: HIVE-19992: Vectorization: Follow-on to HIVE-19951 --> add call to SchemaEvolution.isOnlyImplicitConversion to disable encoded LLAP I/O for ORC only when data type conversion is not implicit (Matt McCline, reviewed by Prasanth Ja

Posted by se...@apache.org.
HIVE-19992: Vectorization: Follow-on to HIVE-19951 --> add call to SchemaEvolution.isOnlyImplicitConversion to disable encoded LLAP I/O for ORC only when data type conversion is not implicit (Matt McCline, reviewed by Prasanth Jayachandran)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/64ceb7ba
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/64ceb7ba
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/64ceb7ba

Branch: refs/heads/master-txnstats
Commit: 64ceb7baece29da0babe927385fdd954796ccca6
Parents: 1b5903b
Author: Matt McCline <mm...@hortonworks.com>
Authored: Sun Jul 15 23:07:13 2018 -0500
Committer: Matt McCline <mm...@hortonworks.com>
Committed: Sun Jul 15 23:07:13 2018 -0500

----------------------------------------------------------------------
 .../hive/llap/io/api/impl/LlapRecordReader.java | 60 +-------------------
 .../llap/orc_ppd_schema_evol_3a.q.out           | 52 ++++-------------
 2 files changed, 13 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/64ceb7ba/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
----------------------------------------------------------------------
diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
index be748e9..3455d16 100644
--- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
+++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java
@@ -289,68 +289,10 @@ class LlapRecordReader
     executor.submit(rp.getReadCallable());
   }
 
-  private boolean hasSchemaEvolutionStringFamilyTruncateIssue(SchemaEvolution evolution) {
-    return hasStringFamilyTruncateTypeIssue(evolution, evolution.getReaderSchema());
-  }
-
-  // We recurse through the types.
-  private boolean hasStringFamilyTruncateTypeIssue(SchemaEvolution evolution,
-      TypeDescription readerType) {
-    TypeDescription fileType = evolution.getFileType(readerType);
-    if (fileType == null) {
-      return false;
-    }
-    switch (fileType.getCategory()) {
-    case BOOLEAN:
-    case BYTE:
-    case SHORT:
-    case INT:
-    case LONG:
-    case DOUBLE:
-    case FLOAT:
-    case STRING:
-    case TIMESTAMP:
-    case BINARY:
-    case DATE:
-    case DECIMAL:
-      // We are only looking for the CHAR/VARCHAR truncate issue.
-      return false;
-    case CHAR:
-    case VARCHAR:
-      if (readerType.getCategory().equals(TypeDescription.Category.CHAR) ||
-          readerType.getCategory().equals(TypeDescription.Category.VARCHAR)) {
-        return (fileType.getMaxLength() > readerType.getMaxLength());
-      }
-      return false;
-    case UNION:
-    case MAP:
-    case LIST:
-    case STRUCT:
-      {
-        List<TypeDescription> readerChildren = readerType.getChildren();
-        final int childCount = readerChildren.size();
-        for (int i = 0; i < childCount; ++i) {
-          if (hasStringFamilyTruncateTypeIssue(evolution, readerChildren.get(i))) {
-            return true;
-          }
-        }
-      }
-      return false;
-    default:
-      throw new IllegalArgumentException("Unknown type " + fileType);
-    }
-  }
-
   private boolean checkOrcSchemaEvolution() {
     SchemaEvolution evolution = rp.getSchemaEvolution();
 
-    /*
-     * FUTURE: When SchemaEvolution.isOnlyImplicitConversion becomes available:
-     *  1) Replace the hasSchemaEvolutionStringFamilyTruncateIssue call with
-     *     !isOnlyImplicitConversion.
-     *  2) Delete hasSchemaEvolutionStringFamilyTruncateIssue code.
-     */
-    if (evolution.hasConversion() && hasSchemaEvolutionStringFamilyTruncateIssue(evolution)) {
+    if (evolution.hasConversion() && !evolution.isOnlyImplicitConversion()) {
 
       // We do not support data type conversion when reading encoded ORC data.
       return false;

http://git-wip-us.apache.org/repos/asf/hive/blob/64ceb7ba/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
index 45586be..e197126 100644
--- a/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
+++ b/ql/src/test/results/clientpositive/llap/orc_ppd_schema_evol_3a.q.out
@@ -1635,9 +1635,9 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n3
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 0
+   HDFS_BYTES_READ: 20860
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1655,13 +1655,7 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_OPERATOR_SEL_2: 6
    RECORDS_OUT_OPERATOR_TS_0: 2100
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 4346
-   CACHE_MISS_BYTES: 0
-   METADATA_CACHE_HIT: 2
-   NUM_DECODED_BATCHES: 3
-   NUM_VECTOR_BATCHES: 3
-   ROWS_EMITTED: 2100
-   SELECTED_ROWGROUPS: 3
+   METADATA_CACHE_HIT: 1
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    INPUT_DIRECTORIES_Map_1: 1
@@ -1673,9 +1667,9 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n3
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 0
+   HDFS_BYTES_READ: 22586
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 3
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1693,13 +1687,7 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_OPERATOR_SEL_2: 6
    RECORDS_OUT_OPERATOR_TS_0: 2100
 Stage-1 LLAP IO COUNTERS:
-   CACHE_HIT_BYTES: 5935
-   CACHE_MISS_BYTES: 0
-   METADATA_CACHE_HIT: 2
-   NUM_DECODED_BATCHES: 3
-   NUM_VECTOR_BATCHES: 3
-   ROWS_EMITTED: 2100
-   SELECTED_ROWGROUPS: 3
+   METADATA_CACHE_HIT: 1
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    INPUT_DIRECTORIES_Map_1: 1
@@ -1863,9 +1851,9 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n3
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 2062
+   HDFS_BYTES_READ: 18628
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1883,15 +1871,7 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_OPERATOR_SEL_2: 4
    RECORDS_OUT_OPERATOR_TS_0: 2100
 Stage-1 LLAP IO COUNTERS:
-   ALLOCATED_BYTES: 786432
-   ALLOCATED_USED_BYTES: 4264
-   CACHE_HIT_BYTES: 24
-   CACHE_MISS_BYTES: 2062
-   METADATA_CACHE_HIT: 2
-   NUM_DECODED_BATCHES: 3
-   NUM_VECTOR_BATCHES: 3
-   ROWS_EMITTED: 2100
-   SELECTED_ROWGROUPS: 3
+   METADATA_CACHE_HIT: 1
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    INPUT_DIRECTORIES_Map_1: 1
@@ -1935,9 +1915,9 @@ PREHOOK: type: QUERY
 PREHOOK: Input: default@orc_ppd_n3
 PREHOOK: Output: hdfs://### HDFS PATH ###
 Stage-1 FILE SYSTEM COUNTERS:
-   HDFS_BYTES_READ: 1215
+   HDFS_BYTES_READ: 19952
    HDFS_BYTES_WRITTEN: 101
-   HDFS_READ_OPS: 4
+   HDFS_READ_OPS: 5
    HDFS_LARGE_READ_OPS: 0
    HDFS_WRITE_OPS: 2
 Stage-1 HIVE COUNTERS:
@@ -1955,15 +1935,7 @@ Stage-1 HIVE COUNTERS:
    RECORDS_OUT_OPERATOR_SEL_2: 4
    RECORDS_OUT_OPERATOR_TS_0: 1000
 Stage-1 LLAP IO COUNTERS:
-   ALLOCATED_BYTES: 262144
-   ALLOCATED_USED_BYTES: 2376
-   CACHE_HIT_BYTES: 2086
-   CACHE_MISS_BYTES: 1215
-   METADATA_CACHE_HIT: 2
-   NUM_DECODED_BATCHES: 1
-   NUM_VECTOR_BATCHES: 1
-   ROWS_EMITTED: 1000
-   SELECTED_ROWGROUPS: 1
+   METADATA_CACHE_HIT: 1
 Stage-1 INPUT COUNTERS:
    GROUPED_INPUT_SPLITS_Map_1: 1
    INPUT_DIRECTORIES_Map_1: 1


[04/48] hive git commit: HIVE-19886 : Logs may be directed to 2 files if --hiveconf hive.log.file is used (Jaume Marhuenda via Zoltan Haindrich)

Posted by se...@apache.org.
HIVE-19886 : Logs may be directed to 2 files if --hiveconf hive.log.file is used (Jaume Marhuenda via Zoltan Haindrich)

Signed-off-by: Ashutosh Chauhan <ha...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bf54424d
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bf54424d
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bf54424d

Branch: refs/heads/master-txnstats
Commit: bf54424d22b6b117595f09b1bbaf8024de0ce906
Parents: 3b88d6c
Author: Jaume Marhuenda <ja...@gmail.com>
Authored: Fri Jul 13 21:55:00 2018 -0700
Committer: Ashutosh Chauhan <ha...@apache.org>
Committed: Fri Jul 13 21:55:00 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hive/service/server/HiveServer2.java  | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bf54424d/service/src/java/org/apache/hive/service/server/HiveServer2.java
----------------------------------------------------------------------
diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java
index 6184fdc..2471883 100644
--- a/service/src/java/org/apache/hive/service/server/HiveServer2.java
+++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hive.common.JvmPauseMonitor;
 import org.apache.hadoop.hive.common.LogUtils;
 import org.apache.hadoop.hive.common.LogUtils.LogInitializationException;
 import org.apache.hadoop.hive.common.ServerUtils;
-import org.apache.hadoop.hive.common.cli.CommonCliOptions;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -1209,11 +1208,17 @@ public class HiveServer2 extends CompositeService {
         for (String propKey : confProps.stringPropertyNames()) {
           // save logging message for log4j output latter after log4j initialize properly
           debugMessage.append("Setting " + propKey + "=" + confProps.getProperty(propKey) + ";\n");
-          if (propKey.equalsIgnoreCase("hive.root.logger")) {
-            CommonCliOptions.splitAndSetLogger(propKey, confProps);
-          } else {
-            System.setProperty(propKey, confProps.getProperty(propKey));
+          if ("hive.log.file".equals(propKey) ||
+              "hive.log.dir".equals(propKey) ||
+              "hive.root.logger".equals(propKey)) {
+            throw new IllegalArgumentException("Logs will be split in two "
+                + "files if the commandline argument " + propKey + " is "
+                + "used. To prevent this use to HADOOP_CLIENT_OPTS -D"
+                + propKey + "=" + confProps.getProperty(propKey)
+                + " or use the set the value in the configuration file"
+                + " (see HIVE-19886)");
           }
+          System.setProperty(propKey, confProps.getProperty(propKey));
         }
 
         // Process --help


[07/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query77.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query77.q.out b/ql/src/test/results/clientpositive/perf/tez/query77.q.out
index 163805b..915d4fd 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query77.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query77.q.out
@@ -1,4 +1,4 @@
-Warning: Shuffle Join MERGEJOIN[307][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 16' is a cross product
+Warning: Shuffle Join MERGEJOIN[315][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 16' is a cross product
 PREHOOK: query: explain
 with ss as
  (select s_store_sk,
@@ -249,296 +249,296 @@ Stage-0
     limit:100
     Stage-1
       Reducer 8 vectorized
-      File Output Operator [FS_360]
-        Limit [LIM_359] (rows=100 width=163)
+      File Output Operator [FS_368]
+        Limit [LIM_367] (rows=100 width=163)
           Number of rows:100
-          Select Operator [SEL_358] (rows=956329968 width=163)
+          Select Operator [SEL_366] (rows=956329968 width=163)
             Output:["_col0","_col1","_col2","_col3","_col4"]
           <-Reducer 7 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_357]
-              Select Operator [SEL_356] (rows=956329968 width=163)
+            SHUFFLE [RS_365]
+              Select Operator [SEL_364] (rows=956329968 width=163)
                 Output:["_col0","_col1","_col2","_col3","_col4"]
-                Group By Operator [GBY_355] (rows=956329968 width=163)
+                Group By Operator [GBY_363] (rows=956329968 width=163)
                   Output:["_col0","_col1","_col3","_col4","_col5"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1, KEY._col2
                 <-Union 6 [SIMPLE_EDGE]
                   <-Reducer 16 [CONTAINS]
-                    Reduce Output Operator [RS_311]
+                    Reduce Output Operator [RS_319]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_310] (rows=1912659936 width=163)
+                      Group By Operator [GBY_318] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_308] (rows=158394413 width=360)
+                        Select Operator [SEL_316] (rows=158394413 width=360)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_307] (rows=158394413 width=360)
+                          Merge Join Operator [MERGEJOIN_315] (rows=158394413 width=360)
                             Conds:(Inner),Output:["_col0","_col1","_col2","_col3","_col4"]
                           <-Reducer 15 [CUSTOM_SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_367]
-                              Group By Operator [GBY_366] (rows=158394413 width=135)
+                            PARTITION_ONLY_SHUFFLE [RS_375]
+                              Group By Operator [GBY_374] (rows=158394413 width=135)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                               <-Reducer 14 [SIMPLE_EDGE]
                                 SHUFFLE [RS_55]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_54] (rows=316788826 width=135)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col1
-                                    Merge Join Operator [MERGEJOIN_293] (rows=316788826 width=135)
-                                      Conds:RS_365._col0=RS_322._col0(Inner),Output:["_col1","_col2","_col3"]
+                                    Merge Join Operator [MERGEJOIN_301] (rows=316788826 width=135)
+                                      Conds:RS_373._col0=RS_330._col0(Inner),Output:["_col1","_col2","_col3"]
                                     <-Map 9 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_322]
+                                      SHUFFLE [RS_330]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_318] (rows=8116 width=1119)
+                                        Select Operator [SEL_326] (rows=8116 width=1119)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_317] (rows=8116 width=1119)
+                                          Filter Operator [FIL_325] (rows=8116 width=1119)
                                             predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-08-04 00:00:00' AND TIMESTAMP'1998-09-03 00:00:00' and d_date_sk is not null)
                                             TableScan [TS_3] (rows=73049 width=1119)
                                               default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                                     <-Map 31 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_365]
+                                      SHUFFLE [RS_373]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_364] (rows=287989836 width=135)
+                                        Select Operator [SEL_372] (rows=287989836 width=135)
                                           Output:["_col0","_col1","_col2","_col3"]
-                                          Filter Operator [FIL_363] (rows=287989836 width=135)
+                                          Filter Operator [FIL_371] (rows=287989836 width=135)
                                             predicate:((cs_sold_date_sk BETWEEN DynamicValue(RS_51_date_dim_d_date_sk_min) AND DynamicValue(RS_51_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_51_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
                                             TableScan [TS_44] (rows=287989836 width=135)
                                               default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_call_center_sk","cs_ext_sales_price","cs_net_profit"]
                                             <-Reducer 17 [BROADCAST_EDGE] vectorized
-                                              BROADCAST [RS_362]
-                                                Group By Operator [GBY_361] (rows=1 width=12)
+                                              BROADCAST [RS_370]
+                                                Group By Operator [GBY_369] (rows=1 width=12)
                                                   Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                 <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  SHUFFLE [RS_332]
-                                                    Group By Operator [GBY_329] (rows=1 width=12)
+                                                  SHUFFLE [RS_340]
+                                                    Group By Operator [GBY_337] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                      Select Operator [SEL_323] (rows=8116 width=1119)
+                                                      Select Operator [SEL_331] (rows=8116 width=1119)
                                                         Output:["_col0"]
-                                                         Please refer to the previous Select Operator [SEL_318]
+                                                         Please refer to the previous Select Operator [SEL_326]
                           <-Reducer 19 [CUSTOM_SIMPLE_EDGE] vectorized
-                            PARTITION_ONLY_SHUFFLE [RS_372]
-                              Group By Operator [GBY_371] (rows=1 width=224)
+                            PARTITION_ONLY_SHUFFLE [RS_380]
+                              Group By Operator [GBY_379] (rows=1 width=224)
                                 Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"]
                               <-Reducer 18 [CUSTOM_SIMPLE_EDGE]
                                 PARTITION_ONLY_SHUFFLE [RS_69]
                                   Group By Operator [GBY_68] (rows=1 width=224)
                                     Output:["_col0","_col1"],aggregations:["sum(_col1)","sum(_col2)"]
-                                    Merge Join Operator [MERGEJOIN_294] (rows=31678769 width=106)
-                                      Conds:RS_370._col0=RS_324._col0(Inner),Output:["_col1","_col2"]
+                                    Merge Join Operator [MERGEJOIN_302] (rows=31678769 width=106)
+                                      Conds:RS_378._col0=RS_332._col0(Inner),Output:["_col1","_col2"]
                                     <-Map 9 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_324]
+                                      SHUFFLE [RS_332]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_318]
+                                         Please refer to the previous Select Operator [SEL_326]
                                     <-Map 32 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_370]
+                                      SHUFFLE [RS_378]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_369] (rows=28798881 width=106)
+                                        Select Operator [SEL_377] (rows=28798881 width=106)
                                           Output:["_col0","_col1","_col2"]
-                                          Filter Operator [FIL_368] (rows=28798881 width=106)
+                                          Filter Operator [FIL_376] (rows=28798881 width=106)
                                             predicate:cr_returned_date_sk is not null
                                             TableScan [TS_58] (rows=28798881 width=106)
                                               default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_returned_date_sk","cr_return_amount","cr_net_loss"]
                   <-Reducer 23 [CONTAINS]
-                    Reduce Output Operator [RS_316]
+                    Reduce Output Operator [RS_324]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_315] (rows=1912659936 width=163)
+                      Group By Operator [GBY_323] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_313] (rows=95833780 width=135)
+                        Select Operator [SEL_321] (rows=95833780 width=135)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_312] (rows=95833780 width=135)
-                            Conds:RS_388._col0=RS_393._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_320] (rows=95833780 width=135)
+                            Conds:RS_396._col0=RS_401._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
                           <-Reducer 22 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_388]
+                            FORWARD [RS_396]
                               PartitionCols:_col0
-                              Group By Operator [GBY_387] (rows=87121617 width=135)
+                              Group By Operator [GBY_395] (rows=87121617 width=135)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                               <-Reducer 21 [SIMPLE_EDGE]
                                 SHUFFLE [RS_94]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_93] (rows=174243235 width=135)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_296] (rows=174243235 width=135)
-                                      Conds:RS_89._col1=RS_377._col0(Inner),Output:["_col2","_col3","_col6"]
+                                    Merge Join Operator [MERGEJOIN_304] (rows=174243235 width=135)
+                                      Conds:RS_89._col1=RS_385._col0(Inner),Output:["_col2","_col3","_col6"]
                                     <-Map 34 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_377]
+                                      PARTITION_ONLY_SHUFFLE [RS_385]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_376] (rows=4602 width=585)
+                                        Select Operator [SEL_384] (rows=4602 width=585)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_375] (rows=4602 width=585)
+                                          Filter Operator [FIL_383] (rows=4602 width=585)
                                             predicate:wp_web_page_sk is not null
                                             TableScan [TS_83] (rows=4602 width=585)
                                               default@web_page,web_page,Tbl:COMPLETE,Col:NONE,Output:["wp_web_page_sk"]
                                     <-Reducer 20 [SIMPLE_EDGE]
                                       SHUFFLE [RS_89]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_295] (rows=158402938 width=135)
-                                          Conds:RS_386._col0=RS_325._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        Merge Join Operator [MERGEJOIN_303] (rows=158402938 width=135)
+                                          Conds:RS_394._col0=RS_333._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_325]
+                                          SHUFFLE [RS_333]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_318]
+                                             Please refer to the previous Select Operator [SEL_326]
                                         <-Map 33 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_386]
+                                          SHUFFLE [RS_394]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_385] (rows=144002668 width=135)
+                                            Select Operator [SEL_393] (rows=144002668 width=135)
                                               Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_384] (rows=144002668 width=135)
+                                              Filter Operator [FIL_392] (rows=144002668 width=135)
                                                 predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_87_date_dim_d_date_sk_min) AND DynamicValue(RS_87_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_87_date_dim_d_date_sk_bloom_filter))) and (ws_web_page_sk BETWEEN DynamicValue(RS_90_web_page_wp_web_page_sk_min) AND DynamicValue(RS_90_web_page_wp_web_page_sk_max) and in_bloom_filter(ws_web_page_sk, DynamicValue(RS_90_web_page_wp_web_page_sk_bloom_filter))) and ws_sold_date_sk is not null and ws_web_page_sk is not null)
                                                 TableScan [TS_77] (rows=144002668 width=135)
                                                   default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_web_page_sk","ws_ext_sales_price","ws_net_profit"]
                                                 <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_374]
-                                                    Group By Operator [GBY_373] (rows=1 width=12)
+                                                  BROADCAST [RS_382]
+                                                    Group By Operator [GBY_381] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_333]
-                                                        Group By Operator [GBY_330] (rows=1 width=12)
+                                                      SHUFFLE [RS_341]
+                                                        Group By Operator [GBY_338] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_326] (rows=8116 width=1119)
+                                                          Select Operator [SEL_334] (rows=8116 width=1119)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_318]
+                                                             Please refer to the previous Select Operator [SEL_326]
                                                 <-Reducer 35 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_383]
-                                                    Group By Operator [GBY_382] (rows=1 width=12)
+                                                  BROADCAST [RS_391]
+                                                    Group By Operator [GBY_390] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      PARTITION_ONLY_SHUFFLE [RS_381]
-                                                        Group By Operator [GBY_380] (rows=1 width=12)
+                                                      PARTITION_ONLY_SHUFFLE [RS_389]
+                                                        Group By Operator [GBY_388] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_378] (rows=4602 width=585)
+                                                          Select Operator [SEL_386] (rows=4602 width=585)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_376]
+                                                             Please refer to the previous Select Operator [SEL_384]
                           <-Reducer 27 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_393]
+                            FORWARD [RS_401]
                               PartitionCols:_col0
-                              Group By Operator [GBY_392] (rows=8711072 width=92)
+                              Group By Operator [GBY_400] (rows=8711072 width=92)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                               <-Reducer 26 [SIMPLE_EDGE]
                                 SHUFFLE [RS_114]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_113] (rows=17422145 width=92)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_298] (rows=17422145 width=92)
-                                      Conds:RS_109._col1=RS_379._col0(Inner),Output:["_col2","_col3","_col6"]
+                                    Merge Join Operator [MERGEJOIN_306] (rows=17422145 width=92)
+                                      Conds:RS_109._col1=RS_387._col0(Inner),Output:["_col2","_col3","_col6"]
                                     <-Map 34 [SIMPLE_EDGE] vectorized
-                                      PARTITION_ONLY_SHUFFLE [RS_379]
+                                      PARTITION_ONLY_SHUFFLE [RS_387]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_376]
+                                         Please refer to the previous Select Operator [SEL_384]
                                     <-Reducer 25 [SIMPLE_EDGE]
                                       SHUFFLE [RS_109]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_297] (rows=15838314 width=92)
-                                          Conds:RS_391._col0=RS_327._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        Merge Join Operator [MERGEJOIN_305] (rows=15838314 width=92)
+                                          Conds:RS_399._col0=RS_335._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_327]
+                                          SHUFFLE [RS_335]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_318]
+                                             Please refer to the previous Select Operator [SEL_326]
                                         <-Map 36 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_391]
+                                          SHUFFLE [RS_399]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_390] (rows=14398467 width=92)
+                                            Select Operator [SEL_398] (rows=14398467 width=92)
                                               Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_389] (rows=14398467 width=92)
+                                              Filter Operator [FIL_397] (rows=14398467 width=92)
                                                 predicate:(wr_returned_date_sk is not null and wr_web_page_sk is not null)
                                                 TableScan [TS_97] (rows=14398467 width=92)
                                                   default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_returned_date_sk","wr_web_page_sk","wr_return_amt","wr_net_loss"]
                   <-Reducer 5 [CONTAINS]
-                    Reduce Output Operator [RS_306]
+                    Reduce Output Operator [RS_314]
                       PartitionCols:_col0, _col1, _col2
-                      Group By Operator [GBY_305] (rows=1912659936 width=163)
+                      Group By Operator [GBY_313] (rows=1912659936 width=163)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5"],aggregations:["sum(_col2)","sum(_col3)","sum(_col4)"],keys:_col0, _col1, 0L
-                        Select Operator [SEL_303] (rows=383325119 width=88)
+                        Select Operator [SEL_311] (rows=383325119 width=88)
                           Output:["_col0","_col1","_col2","_col3","_col4"]
-                          Merge Join Operator [MERGEJOIN_302] (rows=383325119 width=88)
-                            Conds:RS_349._col0=RS_354._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
+                          Merge Join Operator [MERGEJOIN_310] (rows=383325119 width=88)
+                            Conds:RS_357._col0=RS_362._col0(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5"]
                           <-Reducer 13 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_354]
+                            FORWARD [RS_362]
                               PartitionCols:_col0
-                              Group By Operator [GBY_353] (rows=34842647 width=77)
+                              Group By Operator [GBY_361] (rows=34842647 width=77)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                               <-Reducer 12 [SIMPLE_EDGE]
                                 SHUFFLE [RS_37]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_36] (rows=69685294 width=77)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_292] (rows=69685294 width=77)
-                                      Conds:RS_32._col1=RS_340._col0(Inner),Output:["_col2","_col3","_col6"]
+                                    Merge Join Operator [MERGEJOIN_300] (rows=69685294 width=77)
+                                      Conds:RS_32._col1=RS_348._col0(Inner),Output:["_col2","_col3","_col6"]
                                     <-Map 28 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_340]
+                                      SHUFFLE [RS_348]
                                         PartitionCols:_col0
-                                        Select Operator [SEL_337] (rows=1704 width=1910)
+                                        Select Operator [SEL_345] (rows=1704 width=1910)
                                           Output:["_col0"]
-                                          Filter Operator [FIL_336] (rows=1704 width=1910)
+                                          Filter Operator [FIL_344] (rows=1704 width=1910)
                                             predicate:s_store_sk is not null
                                             TableScan [TS_6] (rows=1704 width=1910)
                                               default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_store_sk"]
                                     <-Reducer 11 [SIMPLE_EDGE]
                                       SHUFFLE [RS_32]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_291] (rows=63350266 width=77)
-                                          Conds:RS_352._col0=RS_321._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        Merge Join Operator [MERGEJOIN_299] (rows=63350266 width=77)
+                                          Conds:RS_360._col0=RS_329._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_321]
+                                          SHUFFLE [RS_329]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_318]
+                                             Please refer to the previous Select Operator [SEL_326]
                                         <-Map 30 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_352]
+                                          SHUFFLE [RS_360]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_351] (rows=57591150 width=77)
+                                            Select Operator [SEL_359] (rows=57591150 width=77)
                                               Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_350] (rows=57591150 width=77)
+                                              Filter Operator [FIL_358] (rows=57591150 width=77)
                                                 predicate:(sr_returned_date_sk is not null and sr_store_sk is not null)
                                                 TableScan [TS_20] (rows=57591150 width=77)
                                                   default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_returned_date_sk","sr_store_sk","sr_return_amt","sr_net_loss"]
                           <-Reducer 4 [ONE_TO_ONE_EDGE] vectorized
-                            FORWARD [RS_349]
+                            FORWARD [RS_357]
                               PartitionCols:_col0
-                              Group By Operator [GBY_348] (rows=348477374 width=88)
+                              Group By Operator [GBY_356] (rows=348477374 width=88)
                                 Output:["_col0","_col1","_col2"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0
                               <-Reducer 3 [SIMPLE_EDGE]
                                 SHUFFLE [RS_17]
                                   PartitionCols:_col0
                                   Group By Operator [GBY_16] (rows=696954748 width=88)
                                     Output:["_col0","_col1","_col2"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col6
-                                    Merge Join Operator [MERGEJOIN_290] (rows=696954748 width=88)
-                                      Conds:RS_12._col1=RS_338._col0(Inner),Output:["_col2","_col3","_col6"]
+                                    Merge Join Operator [MERGEJOIN_298] (rows=696954748 width=88)
+                                      Conds:RS_12._col1=RS_346._col0(Inner),Output:["_col2","_col3","_col6"]
                                     <-Map 28 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_338]
+                                      SHUFFLE [RS_346]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_337]
+                                         Please refer to the previous Select Operator [SEL_345]
                                     <-Reducer 2 [SIMPLE_EDGE]
                                       SHUFFLE [RS_12]
                                         PartitionCols:_col1
-                                        Merge Join Operator [MERGEJOIN_289] (rows=633595212 width=88)
-                                          Conds:RS_347._col0=RS_319._col0(Inner),Output:["_col1","_col2","_col3"]
+                                        Merge Join Operator [MERGEJOIN_297] (rows=633595212 width=88)
+                                          Conds:RS_355._col0=RS_327._col0(Inner),Output:["_col1","_col2","_col3"]
                                         <-Map 9 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_319]
+                                          SHUFFLE [RS_327]
                                             PartitionCols:_col0
-                                             Please refer to the previous Select Operator [SEL_318]
+                                             Please refer to the previous Select Operator [SEL_326]
                                         <-Map 1 [SIMPLE_EDGE] vectorized
-                                          SHUFFLE [RS_347]
+                                          SHUFFLE [RS_355]
                                             PartitionCols:_col0
-                                            Select Operator [SEL_346] (rows=575995635 width=88)
+                                            Select Operator [SEL_354] (rows=575995635 width=88)
                                               Output:["_col0","_col1","_col2","_col3"]
-                                              Filter Operator [FIL_345] (rows=575995635 width=88)
+                                              Filter Operator [FIL_353] (rows=575995635 width=88)
                                                 predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_10_date_dim_d_date_sk_min) AND DynamicValue(RS_10_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_10_date_dim_d_date_sk_bloom_filter))) and (ss_store_sk BETWEEN DynamicValue(RS_13_store_s_store_sk_min) AND DynamicValue(RS_13_store_s_store_sk_max) and in_bloom_filter(ss_store_sk, DynamicValue(RS_13_store_s_store_sk_bloom_filter))) and ss_sold_date_sk is not null and ss_store_sk is not null)
                                                 TableScan [TS_0] (rows=575995635 width=88)
                                                   default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_store_sk","ss_ext_sales_price","ss_net_profit"]
                                                 <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_335]
-                                                    Group By Operator [GBY_334] (rows=1 width=12)
+                                                  BROADCAST [RS_343]
+                                                    Group By Operator [GBY_342] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_331]
-                                                        Group By Operator [GBY_328] (rows=1 width=12)
+                                                      SHUFFLE [RS_339]
+                                                        Group By Operator [GBY_336] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_320] (rows=8116 width=1119)
+                                                          Select Operator [SEL_328] (rows=8116 width=1119)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_318]
+                                                             Please refer to the previous Select Operator [SEL_326]
                                                 <-Reducer 29 [BROADCAST_EDGE] vectorized
-                                                  BROADCAST [RS_344]
-                                                    Group By Operator [GBY_343] (rows=1 width=12)
+                                                  BROADCAST [RS_352]
+                                                    Group By Operator [GBY_351] (rows=1 width=12)
                                                       Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                     <-Map 28 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                      SHUFFLE [RS_342]
-                                                        Group By Operator [GBY_341] (rows=1 width=12)
+                                                      SHUFFLE [RS_350]
+                                                        Group By Operator [GBY_349] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                          Select Operator [SEL_339] (rows=1704 width=1910)
+                                                          Select Operator [SEL_347] (rows=1704 width=1910)
                                                             Output:["_col0"]
-                                                             Please refer to the previous Select Operator [SEL_337]
+                                                             Please refer to the previous Select Operator [SEL_345]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query78.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query78.q.out b/ql/src/test/results/clientpositive/perf/tez/query78.q.out
index 90b6f17..b110260 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query78.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query78.q.out
@@ -139,10 +139,10 @@ Stage-0
     limit:100
     Stage-1
       Reducer 6 vectorized
-      File Output Operator [FS_235]
-        Limit [LIM_234] (rows=100 width=88)
+      File Output Operator [FS_238]
+        Limit [LIM_237] (rows=100 width=88)
           Number of rows:100
-          Select Operator [SEL_233] (rows=23425424 width=88)
+          Select Operator [SEL_236] (rows=23425424 width=88)
             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
           <-Reducer 5 [SIMPLE_EDGE]
             SHUFFLE [RS_73]
@@ -150,28 +150,28 @@ Stage-0
                 Output:["_col0","_col1","_col6","_col7","_col8","_col9","_col10","_col11","_col12"]
                 Filter Operator [FIL_71] (rows=23425424 width=88)
                   predicate:(COALESCE(_col11,0) > 0)
-                  Merge Join Operator [MERGEJOIN_188] (rows=70276272 width=88)
-                    Conds:RS_68._col1=RS_232._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9","_col11","_col12","_col13"]
+                  Merge Join Operator [MERGEJOIN_191] (rows=70276272 width=88)
+                    Conds:RS_68._col1=RS_235._col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9","_col11","_col12","_col13"]
                   <-Reducer 12 [SIMPLE_EDGE] vectorized
-                    SHUFFLE [RS_232]
+                    SHUFFLE [RS_235]
                       PartitionCols:_col0
-                      Select Operator [SEL_231] (rows=43558464 width=135)
+                      Select Operator [SEL_234] (rows=43558464 width=135)
                         Output:["_col0","_col1","_col2","_col3"]
-                        Group By Operator [GBY_230] (rows=43558464 width=135)
+                        Group By Operator [GBY_233] (rows=43558464 width=135)
                           Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1
                         <-Reducer 11 [SIMPLE_EDGE]
                           SHUFFLE [RS_65]
                             PartitionCols:_col0, _col1
                             Group By Operator [GBY_64] (rows=87116928 width=135)
                               Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col3, _col4
-                              Merge Join Operator [MERGEJOIN_186] (rows=87116928 width=135)
-                                Conds:RS_195._col0=RS_61._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
+                              Merge Join Operator [MERGEJOIN_189] (rows=87116928 width=135)
+                                Conds:RS_198._col0=RS_61._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
                               <-Map 1 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_195]
+                                SHUFFLE [RS_198]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_190] (rows=36524 width=1119)
+                                  Select Operator [SEL_193] (rows=36524 width=1119)
                                     Output:["_col0"]
-                                    Filter Operator [FIL_189] (rows=36524 width=1119)
+                                    Filter Operator [FIL_192] (rows=36524 width=1119)
                                       predicate:((d_year = 2000) and d_date_sk is not null)
                                       TableScan [TS_0] (rows=73049 width=1119)
                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year"]
@@ -182,32 +182,32 @@ Stage-0
                                     Output:["_col0","_col1","_col2","_col4","_col5","_col6"]
                                     Filter Operator [FIL_58] (rows=79197206 width=135)
                                       predicate:_col8 is null
-                                      Merge Join Operator [MERGEJOIN_185] (rows=158394413 width=135)
-                                        Conds:RS_227._col2, _col3=RS_229._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
+                                      Merge Join Operator [MERGEJOIN_188] (rows=158394413 width=135)
+                                        Conds:RS_230._col2, _col3=RS_232._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
                                       <-Map 20 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_227]
+                                        SHUFFLE [RS_230]
                                           PartitionCols:_col2, _col3
-                                          Select Operator [SEL_226] (rows=143994918 width=135)
+                                          Select Operator [SEL_229] (rows=143994918 width=135)
                                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                            Filter Operator [FIL_225] (rows=143994918 width=135)
+                                            Filter Operator [FIL_228] (rows=143994918 width=135)
                                               predicate:((cs_item_sk = cs_item_sk) and (cs_sold_date_sk BETWEEN DynamicValue(RS_60_date_dim_d_date_sk_min) AND DynamicValue(RS_60_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_60_date_dim_d_date_sk_bloom_filter))) and cs_sold_date_sk is not null)
                                               TableScan [TS_50] (rows=287989836 width=135)
                                                 default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk","cs_order_number","cs_quantity","cs_wholesale_cost","cs_sales_price"]
                                               <-Reducer 13 [BROADCAST_EDGE] vectorized
-                                                BROADCAST [RS_224]
-                                                  Group By Operator [GBY_223] (rows=1 width=12)
+                                                BROADCAST [RS_227]
+                                                  Group By Operator [GBY_226] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                   <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                    SHUFFLE [RS_202]
-                                                      Group By Operator [GBY_199] (rows=1 width=12)
+                                                    SHUFFLE [RS_205]
+                                                      Group By Operator [GBY_202] (rows=1 width=12)
                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                        Select Operator [SEL_196] (rows=36524 width=1119)
+                                                        Select Operator [SEL_199] (rows=36524 width=1119)
                                                           Output:["_col0"]
-                                                           Please refer to the previous Select Operator [SEL_190]
+                                                           Please refer to the previous Select Operator [SEL_193]
                                       <-Map 22 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_229]
+                                        SHUFFLE [RS_232]
                                           PartitionCols:_col0, _col1
-                                          Select Operator [SEL_228] (rows=28798881 width=106)
+                                          Select Operator [SEL_231] (rows=28798881 width=106)
                                             Output:["_col0","_col1"]
                                             TableScan [TS_53] (rows=28798881 width=106)
                                               default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number"]
@@ -216,26 +216,26 @@ Stage-0
                       PartitionCols:_col1
                       Filter Operator [FIL_45] (rows=63887519 width=88)
                         predicate:(COALESCE(_col7,0) > 0)
-                        Merge Join Operator [MERGEJOIN_187] (rows=191662559 width=88)
-                          Conds:RS_212._col1, _col0=RS_222._col1, _col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9"]
+                        Merge Join Operator [MERGEJOIN_190] (rows=191662559 width=88)
+                          Conds:RS_215._col1, _col0=RS_225._col1, _col0(Left Outer),Output:["_col0","_col1","_col2","_col3","_col4","_col7","_col8","_col9"]
                         <-Reducer 3 [ONE_TO_ONE_EDGE] vectorized
-                          FORWARD [RS_212]
+                          FORWARD [RS_215]
                             PartitionCols:_col1, _col0
-                            Select Operator [SEL_211] (rows=174238687 width=88)
+                            Select Operator [SEL_214] (rows=174238687 width=88)
                               Output:["_col0","_col1","_col2","_col3","_col4"]
-                              Group By Operator [GBY_210] (rows=174238687 width=88)
+                              Group By Operator [GBY_213] (rows=174238687 width=88)
                                 Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1
                               <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_18]
                                   PartitionCols:_col0, _col1
                                   Group By Operator [GBY_17] (rows=348477374 width=88)
                                     Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col4, _col3
-                                    Merge Join Operator [MERGEJOIN_182] (rows=348477374 width=88)
-                                      Conds:RS_191._col0=RS_14._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
+                                    Merge Join Operator [MERGEJOIN_185] (rows=348477374 width=88)
+                                      Conds:RS_194._col0=RS_14._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
                                     <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_191]
+                                      SHUFFLE [RS_194]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_190]
+                                         Please refer to the previous Select Operator [SEL_193]
                                     <-Reducer 15 [SIMPLE_EDGE]
                                       SHUFFLE [RS_14]
                                         PartitionCols:_col0
@@ -243,53 +243,53 @@ Stage-0
                                           Output:["_col0","_col1","_col2","_col4","_col5","_col6"]
                                           Filter Operator [FIL_11] (rows=316797606 width=88)
                                             predicate:_col8 is null
-                                            Merge Join Operator [MERGEJOIN_181] (rows=633595212 width=88)
-                                              Conds:RS_207._col1, _col3=RS_209._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
+                                            Merge Join Operator [MERGEJOIN_184] (rows=633595212 width=88)
+                                              Conds:RS_210._col1, _col3=RS_212._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
                                             <-Map 14 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_207]
+                                              SHUFFLE [RS_210]
                                                 PartitionCols:_col1, _col3
-                                                Select Operator [SEL_206] (rows=575995635 width=88)
+                                                Select Operator [SEL_209] (rows=575995635 width=88)
                                                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                  Filter Operator [FIL_205] (rows=575995635 width=88)
+                                                  Filter Operator [FIL_208] (rows=575995635 width=88)
                                                     predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_13_date_dim_d_date_sk_min) AND DynamicValue(RS_13_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_13_date_dim_d_date_sk_bloom_filter))) and ss_sold_date_sk is not null)
                                                     TableScan [TS_3] (rows=575995635 width=88)
                                                       default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_item_sk","ss_customer_sk","ss_ticket_number","ss_quantity","ss_wholesale_cost","ss_sales_price"]
                                                     <-Reducer 7 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_204]
-                                                        Group By Operator [GBY_203] (rows=1 width=12)
+                                                      BROADCAST [RS_207]
+                                                        Group By Operator [GBY_206] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                         <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_200]
-                                                            Group By Operator [GBY_197] (rows=1 width=12)
+                                                          SHUFFLE [RS_203]
+                                                            Group By Operator [GBY_200] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                              Select Operator [SEL_192] (rows=36524 width=1119)
+                                                              Select Operator [SEL_195] (rows=36524 width=1119)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_190]
+                                                                 Please refer to the previous Select Operator [SEL_193]
                                             <-Map 16 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_209]
+                                              SHUFFLE [RS_212]
                                                 PartitionCols:_col0, _col1
-                                                Select Operator [SEL_208] (rows=57591150 width=77)
+                                                Select Operator [SEL_211] (rows=57591150 width=77)
                                                   Output:["_col0","_col1"]
                                                   TableScan [TS_6] (rows=57591150 width=77)
                                                     default@store_returns,store_returns,Tbl:COMPLETE,Col:NONE,Output:["sr_item_sk","sr_ticket_number"]
                         <-Reducer 9 [ONE_TO_ONE_EDGE] vectorized
-                          FORWARD [RS_222]
+                          FORWARD [RS_225]
                             PartitionCols:_col1, _col0
-                            Select Operator [SEL_221] (rows=43560808 width=135)
+                            Select Operator [SEL_224] (rows=43560808 width=135)
                               Output:["_col0","_col1","_col2","_col3","_col4"]
-                              Group By Operator [GBY_220] (rows=43560808 width=135)
+                              Group By Operator [GBY_223] (rows=43560808 width=135)
                                 Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)","sum(VALUE._col2)"],keys:KEY._col0, KEY._col1
                               <-Reducer 8 [SIMPLE_EDGE]
                                 SHUFFLE [RS_39]
                                   PartitionCols:_col0, _col1
                                   Group By Operator [GBY_38] (rows=87121617 width=135)
                                     Output:["_col0","_col1","_col2","_col3","_col4"],aggregations:["sum(_col6)","sum(_col7)","sum(_col8)"],keys:_col4, _col3
-                                    Merge Join Operator [MERGEJOIN_184] (rows=87121617 width=135)
-                                      Conds:RS_193._col0=RS_35._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
+                                    Merge Join Operator [MERGEJOIN_187] (rows=87121617 width=135)
+                                      Conds:RS_196._col0=RS_35._col0(Inner),Output:["_col3","_col4","_col6","_col7","_col8"]
                                     <-Map 1 [SIMPLE_EDGE] vectorized
-                                      SHUFFLE [RS_193]
+                                      SHUFFLE [RS_196]
                                         PartitionCols:_col0
-                                         Please refer to the previous Select Operator [SEL_190]
+                                         Please refer to the previous Select Operator [SEL_193]
                                     <-Reducer 18 [SIMPLE_EDGE]
                                       SHUFFLE [RS_35]
                                         PartitionCols:_col0
@@ -297,32 +297,32 @@ Stage-0
                                           Output:["_col0","_col1","_col2","_col4","_col5","_col6"]
                                           Filter Operator [FIL_32] (rows=79201469 width=135)
                                             predicate:_col8 is null
-                                            Merge Join Operator [MERGEJOIN_183] (rows=158402938 width=135)
-                                              Conds:RS_217._col1, _col3=RS_219._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
+                                            Merge Join Operator [MERGEJOIN_186] (rows=158402938 width=135)
+                                              Conds:RS_220._col1, _col3=RS_222._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col5","_col6","_col8"]
                                             <-Map 17 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_217]
+                                              SHUFFLE [RS_220]
                                                 PartitionCols:_col1, _col3
-                                                Select Operator [SEL_216] (rows=144002668 width=135)
+                                                Select Operator [SEL_219] (rows=144002668 width=135)
                                                   Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"]
-                                                  Filter Operator [FIL_215] (rows=144002668 width=135)
+                                                  Filter Operator [FIL_218] (rows=144002668 width=135)
                                                     predicate:((ws_sold_date_sk BETWEEN DynamicValue(RS_34_date_dim_d_date_sk_min) AND DynamicValue(RS_34_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_34_date_dim_d_date_sk_bloom_filter))) and ws_sold_date_sk is not null)
                                                     TableScan [TS_24] (rows=144002668 width=135)
                                                       default@web_sales,web_sales,Tbl:COMPLETE,Col:NONE,Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk","ws_order_number","ws_quantity","ws_wholesale_cost","ws_sales_price"]
                                                     <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                                      BROADCAST [RS_214]
-                                                        Group By Operator [GBY_213] (rows=1 width=12)
+                                                      BROADCAST [RS_217]
+                                                        Group By Operator [GBY_216] (rows=1 width=12)
                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                         <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_201]
-                                                            Group By Operator [GBY_198] (rows=1 width=12)
+                                                          SHUFFLE [RS_204]
+                                                            Group By Operator [GBY_201] (rows=1 width=12)
                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                              Select Operator [SEL_194] (rows=36524 width=1119)
+                                                              Select Operator [SEL_197] (rows=36524 width=1119)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Select Operator [SEL_190]
+                                                                 Please refer to the previous Select Operator [SEL_193]
                                             <-Map 19 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_219]
+                                              SHUFFLE [RS_222]
                                                 PartitionCols:_col0, _col1
-                                                Select Operator [SEL_218] (rows=14398467 width=92)
+                                                Select Operator [SEL_221] (rows=14398467 width=92)
                                                   Output:["_col0","_col1"]
                                                   TableScan [TS_27] (rows=14398467 width=92)
                                                     default@web_returns,web_returns,Tbl:COMPLETE,Col:NONE,Output:["wr_item_sk","wr_order_number"]


[17/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 3785f89..9dd3787 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -26,10 +26,10 @@ import java.sql.SQLException;
 import java.sql.SQLFeatureNotSupportedException;
 import java.sql.Savepoint;
 import java.sql.Statement;
+import java.time.Instant;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.BitSet;
-import java.util.Calendar;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
@@ -40,7 +40,6 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Set;
 import java.util.SortedSet;
-import java.util.TimeZone;
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Semaphore;
@@ -59,11 +58,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.common.ValidReadTxnList;
 import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
 import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.common.classification.RetrySemantics;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
-import org.apache.hadoop.hive.metastore.MaterializationsInvalidationCache;
-import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockHandler;
 import org.apache.hadoop.hive.metastore.Warehouse;
 import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
 import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
@@ -869,10 +867,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   @RetrySemantics.Idempotent("No-op if already committed")
   public void commitTxn(CommitTxnRequest rqst)
     throws NoSuchTxnException, TxnAbortedException, MetaException {
-    MaterializationsRebuildLockHandler materializationsRebuildLockHandler =
-        MaterializationsRebuildLockHandler.get();
-    List<TransactionRegistryInfo> txnComponents = new ArrayList<>();
-    boolean isUpdateDelete = false;
+    char isUpdateDelete = 'N';
     long txnid = rqst.getTxnid();
     long sourceTxnId = -1;
 
@@ -936,7 +931,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
                   "tc_operation_type " + conflictSQLSuffix));
         }
         if (rs != null && rs.next()) {
-          isUpdateDelete = true;
+          isUpdateDelete = 'Y';
           close(rs);
           //if here it means currently committing txn performed update/delete and we should check WW conflict
           /**
@@ -1033,8 +1028,8 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
           // Move the record from txn_components into completed_txn_components so that the compactor
           // knows where to look to compact.
           s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_database, " +
-                  "ctc_table, ctc_partition, ctc_writeid) select tc_txnid, tc_database, tc_table, " +
-                  "tc_partition, tc_writeid from TXN_COMPONENTS where tc_txnid = " + txnid;
+                  "ctc_table, ctc_partition, ctc_writeid, ctc_update_delete) select tc_txnid, tc_database, tc_table, " +
+                  "tc_partition, tc_writeid, '" + isUpdateDelete + "' from TXN_COMPONENTS where tc_txnid = " + txnid;
           LOG.debug("Going to execute insert <" + s + ">");
 
           if ((stmt.executeUpdate(s)) < 1) {
@@ -1050,10 +1045,11 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
               rows.add(txnid + "," + quoteString(writeEventInfo.getDatabase()) + "," +
                       quoteString(writeEventInfo.getTable()) + "," +
                       quoteString(writeEventInfo.getPartition()) + "," +
-                      writeEventInfo.getWriteId());
+                      writeEventInfo.getWriteId() + "," +
+                      "'" + isUpdateDelete + "'");
             }
             List<String> queries = sqlGenerator.createInsertValuesStmt("COMPLETED_TXN_COMPONENTS " +
-                    "(ctc_txnid," + " ctc_database, ctc_table, ctc_partition, ctc_writeid)", rows);
+                    "(ctc_txnid," + " ctc_database, ctc_table, ctc_partition, ctc_writeid, ctc_update_delete)", rows);
             for (String q : queries) {
               LOG.debug("Going to execute insert  <" + q + "> ");
               stmt.execute(q);
@@ -1066,18 +1062,6 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
           stmt.executeUpdate(s);
         }
 
-        // Obtain information that we need to update registry
-        s = "select ctc_database, ctc_table, ctc_writeid, ctc_timestamp from COMPLETED_TXN_COMPONENTS" +
-                " where ctc_txnid = " + txnid;
-
-        LOG.debug("Going to extract table modification information for invalidation cache <" + s + ">");
-        rs = stmt.executeQuery(s);
-        while (rs.next()) {
-          // We only enter in this loop if the transaction actually affected any table
-          txnComponents.add(new TransactionRegistryInfo(rs.getString(1), rs.getString(2),
-              rs.getLong(3), rs.getTimestamp(4, Calendar.getInstance(TimeZone.getTimeZone("UTC"))).getTime()));
-        }
-
         // cleanup all txn related metadata
         s = "delete from TXN_COMPONENTS where tc_txnid = " + txnid;
         LOG.debug("Going to execute update <" + s + ">");
@@ -1092,29 +1076,19 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
         LOG.debug("Going to execute update <" + s + ">");
         stmt.executeUpdate(s);
         LOG.info("Removed committed transaction: (" + txnid + ") from MIN_HISTORY_LEVEL");
+
+        s = "delete from MATERIALIZATION_REBUILD_LOCKS where mrl_txn_id = " + txnid;
+        LOG.debug("Going to execute update <" + s + ">");
+        stmt.executeUpdate(s);
+
         if (transactionalListeners != null) {
           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
                   EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator);
         }
 
-        MaterializationsInvalidationCache materializationsInvalidationCache =
-            MaterializationsInvalidationCache.get();
-        for (TransactionRegistryInfo info : txnComponents) {
-          if (materializationsInvalidationCache.containsMaterialization(info.dbName, info.tblName) &&
-              !materializationsRebuildLockHandler.readyToCommitResource(info.dbName, info.tblName, txnid)) {
-            throw new MetaException(
-                "Another process is rebuilding the materialized view " + info.fullyQualifiedName);
-          }
-        }
         LOG.debug("Going to commit");
         close(rs);
         dbConn.commit();
-
-        // Update registry with modifications
-        for (TransactionRegistryInfo info : txnComponents) {
-          materializationsInvalidationCache.notifyTableModification(
-              info.dbName, info.tblName, info.writeId, info.timestamp, isUpdateDelete);
-        }
       } catch (SQLException e) {
         LOG.debug("Going to rollback");
         rollbackDBConn(dbConn);
@@ -1125,9 +1099,6 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
         close(commitIdRs);
         close(lockHandle, stmt, dbConn);
         unlockInternal();
-        for (TransactionRegistryInfo info : txnComponents) {
-          materializationsRebuildLockHandler.unlockResource(info.dbName, info.tblName, txnid);
-        }
       }
     } catch (RetryException e) {
       commitTxn(rqst);
@@ -1694,16 +1665,30 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   }
 
   /**
-   * Gets the information of the first transaction for the given table
-   * after the transaction with the input id was committed (if any). 
+   * Get invalidation info for the materialization. Currently, the materialization information
+   * only contains information about whether there was update/delete operations on the source
+   * tables used by the materialization since it was created.
    */
   @Override
   @RetrySemantics.ReadOnly
-  public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit(
-      String inputDbName, String inputTableName, ValidWriteIdList txnList)
-          throws MetaException {
-    final List<Long> openTxns = Arrays.asList(ArrayUtils.toObject(txnList.getInvalidWriteIds()));
+  public Materialization getMaterializationInvalidationInfo(
+      CreationMetadata creationMetadata, String validTxnListStr) throws MetaException {
+    if (creationMetadata.getTablesUsed().isEmpty()) {
+      // Bail out
+      LOG.warn("Materialization creation metadata does not contain any table");
+      return null;
+    }
+
+    // Parse validTxnList
+    final ValidReadTxnList validTxnList =
+        new ValidReadTxnList(validTxnListStr);
+
+    // Parse validReaderWriteIdList from creation metadata
+    final ValidTxnWriteIdList validReaderWriteIdList =
+        new ValidTxnWriteIdList(creationMetadata.getValidTxnList());
 
+    // We are composing a query that returns a single row if an update happened after
+    // the materialization was created. Otherwise, query returns 0 rows.
     Connection dbConn = null;
     Statement stmt = null;
     ResultSet rs = null;
@@ -1711,32 +1696,207 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
       stmt = dbConn.createStatement();
       stmt.setMaxRows(1);
-      String s = "select ctc_timestamp, ctc_writeid, ctc_database, ctc_table "
-          + "from COMPLETED_TXN_COMPONENTS "
-          + "where ctc_database=" + quoteString(inputDbName) + " and ctc_table=" + quoteString(inputTableName)
-          + " and ctc_writeid > " + txnList.getHighWatermark()
-          + (txnList.getInvalidWriteIds().length == 0 ?
-              " " : " or ctc_writeid IN(" + StringUtils.join(",", openTxns) + ") ")
-          + "order by ctc_timestamp asc";
+      StringBuilder query = new StringBuilder();
+      // compose a query that select transactions containing an update...
+      query.append("select ctc_update_delete from COMPLETED_TXN_COMPONENTS where ctc_update_delete='Y' AND (");
+      int i = 0;
+      for (String fullyQualifiedName : creationMetadata.getTablesUsed()) {
+        // ...for each of the tables that are part of the materialized view,
+        // where the transaction had to be committed after the materialization was created...
+        if (i != 0) {
+          query.append("OR");
+        }
+        String[] names = TxnUtils.getDbTableName(fullyQualifiedName);
+        query.append(" (ctc_database=" + quoteString(names[0]) + " AND ctc_table=" + quoteString(names[1]));
+        ValidWriteIdList tblValidWriteIdList =
+            validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
+        if (tblValidWriteIdList == null) {
+          LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen");
+          return null;
+        }
+        query.append(" AND (ctc_writeid > " + tblValidWriteIdList.getHighWatermark());
+        query.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " :
+            " OR ctc_writeid IN(" + StringUtils.join(",",
+                Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ");
+        query.append(") ");
+        i++;
+      }
+      // ... and where the transaction has already been committed as per snapshot taken
+      // when we are running current query
+      query.append(") AND ctc_txnid <= " + validTxnList.getHighWatermark());
+      query.append(validTxnList.getInvalidTransactions().length == 0 ? " " :
+          " AND ctc_txnid NOT IN(" + StringUtils.join(",",
+              Arrays.asList(ArrayUtils.toObject(validTxnList.getInvalidTransactions()))) + ") ");
+
+      // Execute query
+      String s = query.toString();
       if (LOG.isDebugEnabled()) {
         LOG.debug("Going to execute query <" + s + ">");
       }
       rs = stmt.executeQuery(s);
 
-      if(!rs.next()) {
-        return new BasicTxnInfo(true);
+      return new Materialization(rs.next());
+    } catch (SQLException ex) {
+      LOG.warn("getMaterializationInvalidationInfo failed due to " + getMessage(ex), ex);
+      throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+          StringUtils.stringifyException(ex));
+    } finally {
+      close(rs, stmt, dbConn);
+    }
+  }
+
+  @Override
+  public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+      throws MetaException {
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Acquiring lock for materialization rebuild with txnId={} for {}", txnId, Warehouse.getQualifiedName(dbName,tableName));
+    }
+
+    TxnStore.MutexAPI.LockHandle handle = null;
+    Connection dbConn = null;
+    Statement stmt = null;
+    ResultSet rs = null;
+    try {
+      lockInternal();
+      /**
+       * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in
+       * Initiated/Working state for any resource. This ensures we do not run concurrent
+       * rebuild operations on any materialization.
+       */
+      handle = getMutexAPI().acquireLock(MUTEX_KEY.MaterializationRebuild.name());
+      dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+      stmt = dbConn.createStatement();
+
+      String selectQ = "select mrl_txn_id from MATERIALIZATION_REBUILD_LOCKS where" +
+          " mrl_db_name =" + quoteString(dbName) +
+          " AND mrl_tbl_name=" + quoteString(tableName);
+      LOG.debug("Going to execute query <" + selectQ + ">");
+      rs = stmt.executeQuery(selectQ);
+      if(rs.next()) {
+        LOG.info("Ignoring request to rebuild " + dbName + "/" + tableName +
+            " since it is already being rebuilt");
+        return new LockResponse(txnId, LockState.NOT_ACQUIRED);
       }
-      final BasicTxnInfo txnInfo = new BasicTxnInfo(false);
-      txnInfo.setTime(rs.getTimestamp(1, Calendar.getInstance(TimeZone.getTimeZone("UTC"))).getTime());
-      txnInfo.setTxnid(rs.getLong(2));
-      txnInfo.setDbname(rs.getString(3));
-      txnInfo.setTablename(rs.getString(4));
-      return txnInfo;
+      String insertQ = "insert into MATERIALIZATION_REBUILD_LOCKS " +
+          "(mrl_txn_id, mrl_db_name, mrl_tbl_name, mrl_last_heartbeat) values (" + txnId +
+          ", '" + dbName + "', '" + tableName + "', " + Instant.now().toEpochMilli() + ")";
+      LOG.debug("Going to execute update <" + insertQ + ">");
+      stmt.executeUpdate(insertQ);
+      LOG.debug("Going to commit");
+      dbConn.commit();
+      return new LockResponse(txnId, LockState.ACQUIRED);
     } catch (SQLException ex) {
-      LOG.warn("getLastCompletedTransactionForTable failed due to " + getMessage(ex), ex);
-      throw new MetaException("Unable to retrieve commits information due to " + StringUtils.stringifyException(ex));
+      LOG.warn("lockMaterializationRebuild failed due to " + getMessage(ex), ex);
+      throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+          StringUtils.stringifyException(ex));
     } finally {
       close(rs, stmt, dbConn);
+      if(handle != null) {
+        handle.releaseLocks();
+      }
+      unlockInternal();
+    }
+  }
+
+  @Override
+  public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+      throws MetaException {
+    try {
+      Connection dbConn = null;
+      Statement stmt = null;
+      ResultSet rs = null;
+      try {
+        lockInternal();
+        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+        stmt = dbConn.createStatement();
+        String s = "update MATERIALIZATION_REBUILD_LOCKS" +
+            " set mrl_last_heartbeat = " + Instant.now().toEpochMilli() +
+            " where mrl_txn_id = " + txnId +
+            " AND mrl_db_name =" + quoteString(dbName) +
+            " AND mrl_tbl_name=" + quoteString(tableName);
+        LOG.debug("Going to execute update <" + s + ">");
+        int rc = stmt.executeUpdate(s);
+        if (rc < 1) {
+          LOG.debug("Going to rollback");
+          dbConn.rollback();
+          LOG.info("No lock found for rebuild of " + Warehouse.getQualifiedName(dbName, tableName) +
+              " when trying to heartbeat");
+          // It could not be renewed, return that information
+          return false;
+        }
+        LOG.debug("Going to commit");
+        dbConn.commit();
+        // It could be renewed, return that information
+        return true;
+      } catch (SQLException e) {
+        LOG.debug("Going to rollback");
+        rollbackDBConn(dbConn);
+        checkRetryable(dbConn, e,
+            "heartbeatLockMaterializationRebuild(" + Warehouse.getQualifiedName(dbName, tableName) + ", " + txnId + ")");
+        throw new MetaException("Unable to heartbeat rebuild lock due to " +
+            StringUtils.stringifyException(e));
+      } finally {
+        close(rs, stmt, dbConn);
+        unlockInternal();
+      }
+    } catch (RetryException e) {
+      return heartbeatLockMaterializationRebuild(dbName, tableName ,txnId);
+    }
+  }
+
+  @Override
+  public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException {
+    try {
+      // Aux values
+      long cnt = 0L;
+      List<Long> txnIds = new ArrayList<>();
+      long timeoutTime = Instant.now().toEpochMilli() - timeout;
+
+      Connection dbConn = null;
+      Statement stmt = null;
+      ResultSet rs = null;
+      try {
+        lockInternal();
+        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+        stmt = dbConn.createStatement();
+
+        String selectQ = "select mrl_txn_id, mrl_last_heartbeat from MATERIALIZATION_REBUILD_LOCKS";
+        LOG.debug("Going to execute query <" + selectQ + ">");
+        rs = stmt.executeQuery(selectQ);
+        while(rs.next()) {
+          long lastHeartbeat = rs.getLong(2);
+          if (lastHeartbeat < timeoutTime) {
+            // The heartbeat has timeout, double check whether we can remove it
+            long txnId = rs.getLong(1);
+            if (validTxnList.isTxnValid(txnId) || validTxnList.isTxnAborted(txnId)) {
+              // Txn was committed (but notification was not received) or it was aborted.
+              // Either case, we can clean it up
+              txnIds.add(txnId);
+            }
+          }
+        }
+        if (!txnIds.isEmpty()) {
+          String deleteQ = "delete from MATERIALIZATION_REBUILD_LOCKS where" +
+              " mrl_txn_id IN(" + StringUtils.join(",", txnIds) + ") ";
+          LOG.debug("Going to execute update <" + deleteQ + ">");
+          cnt = stmt.executeUpdate(deleteQ);
+        }
+        LOG.debug("Going to commit");
+        dbConn.commit();
+        return cnt;
+      } catch (SQLException e) {
+        LOG.debug("Going to rollback");
+        rollbackDBConn(dbConn);
+        checkRetryable(dbConn, e, "cleanupMaterializationRebuildLocks");
+        throw new MetaException("Unable to clean rebuild locks due to " +
+            StringUtils.stringifyException(e));
+      } finally {
+        close(rs, stmt, dbConn);
+        unlockInternal();
+      }
+    } catch (RetryException e) {
+      return cleanupMaterializationRebuildLocks(validTxnList, timeout);
     }
   }
 
@@ -2009,6 +2169,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
   private static String normalizeCase(String s) {
     return s == null ? null : s.toLowerCase();
   }
+
   private LockResponse checkLockWithRetry(Connection dbConn, long extLockId, long txnId)
     throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException {
     try {
@@ -4887,20 +5048,4 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
     }
   };
 
-  private class TransactionRegistryInfo {
-    final String dbName;
-    final String tblName;
-    final String fullyQualifiedName;
-    final long writeId;
-    final long timestamp;
-
-    public TransactionRegistryInfo (String dbName, String tblName, long writeId, long timestamp) {
-      this.dbName = dbName;
-      this.tblName = tblName;
-      this.fullyQualifiedName = Warehouse.getQualifiedName(dbName, tblName);
-      this.writeId = writeId;
-      this.timestamp = timestamp;
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index d972d10..33f24fb 100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.ValidWriteIdList;
 import org.apache.hadoop.hive.common.classification.RetrySemantics;
 import org.apache.hadoop.hive.metastore.api.*;
@@ -29,6 +30,7 @@ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
 import java.sql.SQLException;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 /**
@@ -41,7 +43,7 @@ public interface TxnStore extends Configurable {
 
   enum MUTEX_KEY {
     Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock,
-    WriteSetCleaner, CompactionScheduler, WriteIdAllocator
+    WriteSetCleaner, CompactionScheduler, WriteIdAllocator, MaterializationRebuild
   }
   // Compactor states (Should really be enum)
   String INITIATED_RESPONSE = "initiated";
@@ -128,21 +130,33 @@ public interface TxnStore extends Configurable {
   void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException;
 
   /**
-   * Get the first transaction corresponding to given database and table after transactions
-   * referenced in the transaction snapshot.
-   * @return
+   * Get invalidation info for the materialization. Currently, the materialization information
+   * only contains information about whether there was update/delete operations on the source
+   * tables used by the materialization since it was created.
+   * @param cm creation metadata for the materialization
+   * @param validTxnList valid transaction list for snapshot taken for current query
    * @throws MetaException
    */
   @RetrySemantics.Idempotent
-  BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit(
-      String inputDbName, String inputTableName, ValidWriteIdList txnList)
+  Materialization getMaterializationInvalidationInfo(
+      final CreationMetadata cm, final String validTxnList)
           throws MetaException;
-  /**
-   * Gets the list of valid write ids for the given table wrt to current txn
-   * @param rqst info on transaction and list of table names associated with given transaction
-   * @throws NoSuchTxnException
-   * @throws MetaException
-   */
+
+  LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+      throws MetaException;
+
+  boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+      throws MetaException;
+
+  long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout)
+      throws MetaException;
+
+    /**
+     * Gets the list of valid write ids for the given table wrt to current txn
+     * @param rqst info on transaction and list of table names associated with given transaction
+     * @throws NoSuchTxnException
+     * @throws MetaException
+     */
   @RetrySemantics.ReadOnly
   GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
           throws NoSuchTxnException,  MetaException;

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-3.1.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-3.1.0.derby.sql b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-3.1.0.derby.sql
index a696d06..5e8693e 100644
--- a/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-3.1.0.derby.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-3.1.0.derby.sql
@@ -197,7 +197,8 @@ CREATE TABLE "APP"."MV_CREATION_METADATA" (
   "CAT_NAME" VARCHAR(256) NOT NULL,
   "DB_NAME" VARCHAR(128) NOT NULL,
   "TBL_NAME" VARCHAR(256) NOT NULL,
-  "TXN_LIST" CLOB
+  "TXN_LIST" CLOB,
+  "MATERIALIZATION_TIME" BIGINT NOT NULL
 );
 
 CREATE TABLE "APP"."MV_TABLES_USED" (
@@ -526,7 +527,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -645,6 +647,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID BIGINT NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "APP"."I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" integer not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 7cab4fb..5ba71c4 100644
--- a/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -197,7 +197,8 @@ CREATE TABLE "APP"."MV_CREATION_METADATA" (
   "CAT_NAME" VARCHAR(256) NOT NULL,
   "DB_NAME" VARCHAR(128) NOT NULL,
   "TBL_NAME" VARCHAR(256) NOT NULL,
-  "TXN_LIST" CLOB
+  "TXN_LIST" CLOB,
+  "MATERIALIZATION_TIME" BIGINT NOT NULL
 );
 
 CREATE TABLE "APP"."MV_TABLES_USED" (
@@ -526,7 +527,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -645,6 +647,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID BIGINT NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "APP"."I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" integer not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql b/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql
index 7058ab0..2b200f2 100644
--- a/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql
@@ -45,5 +45,24 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
 );
 INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
 
+-- HIVE-19027
+-- add column MATERIALIZATION_TIME (bigint) to MV_CREATION_METADATA table
+ALTER TABLE "APP"."MV_CREATION_METADATA" ADD COLUMN "MATERIALIZATION_TIME" BIGINT;
+UPDATE "APP"."MV_CREATION_METADATA" SET "MATERIALIZATION_TIME" = 0;
+ALTER TABLE "APP"."MV_CREATION_METADATA" ALTER COLUMN "MATERIALIZATION_TIME" NOT NULL;
+
+-- add column CTC_UPDATE_DELETE (char) to COMPLETED_TXN_COMPONENTS table
+ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_UPDATE_DELETE char(1);
+UPDATE COMPLETED_TXN_COMPONENTS SET CTC_UPDATE_DELETE = 'N';
+ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_UPDATE_DELETE NOT NULL;
+
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID BIGINT NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 -- This needs to be the last thing done.  Insert any changes above this line.
 UPDATE "APP".VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1;

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
index d7722dc..446ee6e 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-3.1.0.mssql.sql
@@ -388,7 +388,8 @@ CREATE TABLE MV_CREATION_METADATA
     CAT_NAME nvarchar(256) NOT NULL,
     DB_NAME nvarchar(128) NOT NULL,
     TBL_NAME nvarchar(256) NOT NULL,
-    TXN_LIST text NULL
+    TXN_LIST text NULL,
+    MATERIALIZATION_TIME bigint NOT NULL
 );
 
 ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
@@ -1034,7 +1035,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS(
 	CTC_TABLE nvarchar(128) NULL,
 	CTC_PARTITION nvarchar(767) NULL,
     CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL,
-    CTC_WRITEID bigint
+    CTC_WRITEID bigint,
+    CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1191,6 +1193,17 @@ PRIMARY KEY CLUSTERED
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME nvarchar(128) NOT NULL,
+  MRL_TBL_NAME nvarchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+PRIMARY KEY CLUSTERED
+(
+    MRL_TXN_ID ASC
+)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" int not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index a81fc40..bbc8ea2 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@ -248,7 +248,6 @@ CREATE TABLE TAB_COL_STATS
 ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
 CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
 
-
 -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
 CREATE TABLE TYPES
 (
@@ -389,7 +388,8 @@ CREATE TABLE MV_CREATION_METADATA
     CAT_NAME nvarchar(256) NOT NULL,
     DB_NAME nvarchar(128) NOT NULL,
     TBL_NAME nvarchar(256) NOT NULL,
-    TXN_LIST text NULL
+    TXN_LIST text NULL,
+    MATERIALIZATION_TIME bigint NOT NULL
 );
 
 ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
@@ -1035,7 +1035,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS(
 	CTC_TABLE nvarchar(128) NULL,
 	CTC_PARTITION nvarchar(767) NULL,
     CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL,
-    CTC_WRITEID bigint
+    CTC_WRITEID bigint,
+    CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1192,6 +1193,17 @@ PRIMARY KEY CLUSTERED
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME nvarchar(128) NOT NULL,
+  MRL_TBL_NAME nvarchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+PRIMARY KEY CLUSTERED
+(
+    MRL_TXN_ID ASC
+)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" int not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql b/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
index 41f23f7..d44cfdb 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql
@@ -46,6 +46,25 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
 ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION);
 INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
 
+-- HIVE-19027
+-- add column MATERIALIZATION_TIME (bigint) to MV_CREATION_METADATA table
+ALTER TABLE MV_CREATION_METADATA ADD MATERIALIZATION_TIME bigint NOT NULL DEFAULT(0);
+
+-- add column CTC_UPDATE_DELETE (char) to COMPLETED_TXN_COMPONENTS table
+ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_UPDATE_DELETE char(1) NOT NULL DEFAULT('N');
+
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME nvarchar(128) NOT NULL,
+  MRL_TBL_NAME nvarchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+PRIMARY KEY CLUSTERED
+(
+    MRL_TXN_ID ASC
+)
+);
+
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS MESSAGE;

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
index 29d4a43..75612a7 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-3.1.0.mysql.sql
@@ -603,6 +603,7 @@ CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` (
   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
   `TXN_LIST` TEXT DEFAULT NULL,
+  `MATERIALIZATION_TIME` bigint(20) NOT NULL,
   PRIMARY KEY (`MV_CREATION_METADATA_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
@@ -1006,7 +1007,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE;
@@ -1124,6 +1126,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
 CREATE TABLE `I_SCHEMA` (
   `SCHEMA_ID` BIGINT PRIMARY KEY,
   `SCHEMA_TYPE` INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 968f4a4..d53e7fc 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -603,6 +603,7 @@ CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` (
   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
   `TXN_LIST` TEXT DEFAULT NULL,
+  `MATERIALIZATION_TIME` bigint(20) NOT NULL,
   PRIMARY KEY (`MV_CREATION_METADATA_ID`)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 /*!40101 SET character_set_client = @saved_cs_client */;
@@ -1006,7 +1007,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE;
@@ -1124,6 +1126,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
 CREATE TABLE `I_SCHEMA` (
   `SCHEMA_ID` BIGINT PRIMARY KEY,
   `SCHEMA_TYPE` INTEGER NOT NULL,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql b/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql
index e103bef..7752e89 100644
--- a/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql
@@ -46,6 +46,26 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
 
+-- HIVE-19027
+-- add column MATERIALIZATION_TIME (bigint) to MV_CREATION_METADATA table
+ALTER TABLE `MV_CREATION_METADATA` ADD `MATERIALIZATION_TIME` BIGINT;
+UPDATE `MV_CREATION_METADATA` SET `MATERIALIZATION_TIME` = 0;
+ALTER TABLE `MV_CREATION_METADATA` MODIFY COLUMN `MATERIALIZATION_TIME` BIGINT NOT NULL;
+
+-- add column CTC_UPDATE_DELETE (char) to COMPLETED_TXN_COMPONENTS table
+ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_UPDATE_DELETE char(1);
+UPDATE COMPLETED_TXN_COMPONENTS SET CTC_UPDATE_DELETE = 'N';
+ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY COLUMN CTC_UPDATE_DELETE char(1) NOT NULL;
+
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID BIGINT NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS ' ';

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
index 9adea31..a4720c8 100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-3.1.0.oracle.sql
@@ -410,7 +410,8 @@ CREATE TABLE MV_CREATION_METADATA
     CAT_NAME VARCHAR2(256) NOT NULL,
     DB_NAME VARCHAR2(128) NOT NULL,
     TBL_NAME VARCHAR2(256) NOT NULL,
-    TXN_LIST CLOB NULL
+    TXN_LIST CLOB NULL,
+    MATERIALIZATION_TIME NUMBER NOT NULL
 );
 
 ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
@@ -983,7 +984,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE VARCHAR2(256),
   CTC_PARTITION VARCHAR2(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID NUMBER(19)
+  CTC_WRITEID NUMBER(19),
+  CTC_UPDATE_DELETE CHAR(1) NOT NULL
 ) ROWDEPENDENCIES;
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1100,6 +1102,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID NUMBER NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT NUMBER NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" number primary key,
   "SCHEMA_TYPE" number not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index faca669..e58ee33 100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@ -410,7 +410,8 @@ CREATE TABLE MV_CREATION_METADATA
     CAT_NAME VARCHAR2(256) NOT NULL,
     DB_NAME VARCHAR2(128) NOT NULL,
     TBL_NAME VARCHAR2(256) NOT NULL,
-    TXN_LIST CLOB NULL
+    TXN_LIST CLOB NULL,
+    MATERIALIZATION_TIME NUMBER NOT NULL
 );
 
 ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
@@ -983,7 +984,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE VARCHAR2(256),
   CTC_PARTITION VARCHAR2(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID NUMBER(19)
+  CTC_WRITEID NUMBER(19),
+  CTC_UPDATE_DELETE CHAR(1) NOT NULL
 ) ROWDEPENDENCIES;
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1100,6 +1102,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID NUMBER NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT NUMBER NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" number primary key,
   "SCHEMA_TYPE" number not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql b/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql
index cf8699b..e4efe4d 100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.0.0-to-3.1.0.oracle.sql
@@ -46,6 +46,25 @@ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
 );
 INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
 
+-- HIVE-19027
+-- add column MATERIALIZATION_TIME (bigint) to MV_CREATION_METADATA table
+ALTER TABLE MV_CREATION_METADATA ADD MATERIALIZATION_TIME NUMBER NULL;
+UPDATE MV_CREATION_METADATA SET MATERIALIZATION_TIME = 0;
+ALTER TABLE MV_CREATION_METADATA MODIFY(MATERIALIZATION_TIME NOT NULL);
+
+-- add column CTC_UPDATE_DELETE (char) to COMPLETED_TXN_COMPONENTS table
+ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_UPDATE_DELETE char(1) NULL;
+UPDATE COMPLETED_TXN_COMPONENTS SET CTC_UPDATE_DELETE = 'N';
+ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_UPDATE_DELETE NOT NULL);
+
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID NUMBER NOT NULL,
+  MRL_DB_NAME VARCHAR(128) NOT NULL,
+  MRL_TBL_NAME VARCHAR(256) NOT NULL,
+  MRL_LAST_HEARTBEAT NUMBER NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE VERSION SET SCHEMA_VERSION='3.1.0', VERSION_COMMENT='Hive release version 3.1.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0' AS Status from dual;

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
index 7a8a419..a74c388 100644
--- a/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-3.1.0.postgres.sql
@@ -404,7 +404,8 @@ CREATE TABLE "MV_CREATION_METADATA" (
     "CAT_NAME" character varying(256) NOT NULL,
     "DB_NAME" character varying(128) NOT NULL,
     "TBL_NAME" character varying(256) NOT NULL,
-    "TXN_LIST" text
+    "TXN_LIST" text,
+    "MATERIALIZATION_TIME" bigint NOT NULL
 );
 
 --
@@ -1673,7 +1674,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1790,6 +1792,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME varchar(128) NOT NULL,
+  MRL_TBL_NAME varchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" integer not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 2e7ac5a..5d1a525 100644
--- a/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -404,7 +404,8 @@ CREATE TABLE "MV_CREATION_METADATA" (
     "CAT_NAME" character varying(256) NOT NULL,
     "DB_NAME" character varying(128) NOT NULL,
     "TBL_NAME" character varying(256) NOT NULL,
-    "TXN_LIST" text
+    "TXN_LIST" text,
+    "MATERIALIZATION_TIME" bigint NOT NULL
 );
 
 --
@@ -1282,6 +1283,11 @@ CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
 
 CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
 
+--
+-- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+--
+
+CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME");
 
 --
 -- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
@@ -1303,13 +1309,6 @@ CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
 CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
 
 --
--- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
---
-
-CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME");
-
-
---
 -- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
 --
 
@@ -1675,7 +1674,8 @@ CREATE TABLE COMPLETED_TXN_COMPONENTS (
   CTC_TABLE varchar(256),
   CTC_PARTITION varchar(767),
   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
-  CTC_WRITEID bigint
+  CTC_WRITEID bigint,
+  CTC_UPDATE_DELETE char(1) NOT NULL
 );
 
 CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
@@ -1792,6 +1792,14 @@ CREATE TABLE MIN_HISTORY_LEVEL (
 
 CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
 
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME varchar(128) NOT NULL,
+  MRL_TBL_NAME varchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 CREATE TABLE "I_SCHEMA" (
   "SCHEMA_ID" bigint primary key,
   "SCHEMA_TYPE" integer not null,

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql b/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql
index 445c3a2..dadf065 100644
--- a/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.0.0-to-3.1.0.postgres.sql
@@ -48,6 +48,25 @@ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" (
 );
 INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
 
+-- HIVE-19027
+-- add column MATERIALIZATION_TIME (bigint) to MV_CREATION_METADATA table
+ALTER TABLE "MV_CREATION_METADATA" ADD COLUMN "MATERIALIZATION_TIME" bigint NULL;
+UPDATE "MV_CREATION_METADATA" SET "MATERIALIZATION_TIME" = 0;
+ALTER TABLE "MV_CREATION_METADATA" ALTER COLUMN "MATERIALIZATION_TIME" SET NOT NULL;
+
+-- add column CTC_UPDATE_DELETE (char) to COMPLETED_TXN_COMPONENTS table
+ALTER TABLE COMPLETED_TXN_COMPONENTS ADD COLUMN CTC_UPDATE_DELETE char(1) NULL;
+UPDATE COMPLETED_TXN_COMPONENTS SET CTC_UPDATE_DELETE = 'N';
+ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_UPDATE_DELETE SET NOT NULL;
+
+CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+  MRL_TXN_ID bigint NOT NULL,
+  MRL_DB_NAME varchar(128) NOT NULL,
+  MRL_TBL_NAME varchar(256) NOT NULL,
+  MRL_LAST_HEARTBEAT bigint NOT NULL,
+  PRIMARY KEY(MRL_TXN_ID)
+);
+
 -- These lines need to be last.  Insert any changes above.
 UPDATE "VERSION" SET "SCHEMA_VERSION"='3.1.0', "VERSION_COMMENT"='Hive release version 3.1.0' where "VER_ID"=1;
 SELECT 'Finished upgrading MetaStore schema from 3.0.0 to 3.1.0';

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 1ca6454..8965059 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -1073,6 +1073,7 @@ struct CreationMetadata {
     3: required string tblName,
     4: required set<string> tablesUsed,
     5: optional string validTxnList,
+    6: optional i64 materializationTime
 }
 
 struct NotificationEventRequest {
@@ -1273,10 +1274,7 @@ struct TableMeta {
 }
 
 struct Materialization {
-  1: required set<string> tablesUsed;
-  2: optional string validTxnList
-  3: optional i64 invalidationTime;
-  4: optional bool sourceTablesUpdateDeleteModified;
+  1: required bool sourceTablesUpdateDeleteModified;
 }
 
 // Data types for workload management.
@@ -1728,7 +1726,7 @@ service ThriftHiveMetastore extends fb303.FacebookService
   GetTableResult get_table_req(1:GetTableRequest req) throws (1:MetaException o1, 2:NoSuchObjectException o2)
   GetTablesResult get_table_objects_by_name_req(1:GetTablesRequest req)
 				   throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
-  map<string, Materialization> get_materialization_invalidation_info(1:string dbname, 2:list<string> tbl_names)
+  Materialization get_materialization_invalidation_info(1:CreationMetadata creation_metadata, 2:string validTxnList)
 				   throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
   void update_creation_metadata(1: string catName, 2:string dbname, 3:string tbl_name, 4:CreationMetadata creation_metadata)
                    throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 53c4d24..d91f737 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -164,8 +164,6 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
       // instantiate the metastore server handler directly instead of connecting
       // through the network
       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
-      // Initialize materializations invalidation cache (only for local metastore)
-      MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client);
       isConnected = true;
       snapshotActiveConf();
       return;
@@ -1442,10 +1440,9 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   /** {@inheritDoc} */
   @Override
-  public Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+  public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
       throws MetaException, InvalidOperationException, UnknownDBException, TException {
-    return client.get_materialization_invalidation_info(
-        dbName, filterHook.filterTableNames(null, dbName, viewNames));
+    return client.get_materialization_invalidation_info(cm, validTxnList);
   }
 
   /** {@inheritDoc} */

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java
deleted file mode 100644
index 8debcce..0000000
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.BasicTxnInfo;
-import org.apache.hadoop.hive.metastore.api.CreationMetadata;
-import org.apache.hadoop.hive.metastore.api.Materialization;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.junit.Assert;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
-import org.junit.runners.MethodSorters;
-
-import java.util.Map;
-
-import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-/**
- * Unit tests for {@link org.apache.hadoop.hive.metastore.MaterializationsInvalidationCache}.
- * The tests focus on arrival of notifications (possibly out of order) and the logic
- * to clean up the materializations cache. Tests need to be executed in a certain order
- * to avoid interactions among them, as the invalidation cache is a singleton.
- */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
-public class TestMetaStoreMaterializationsCacheCleaner {
-
-  private static final String DB_NAME = "hive3252";
-  private static final String TBL_NAME_1 = "tmptbl1";
-  private static final String TBL_NAME_2 = "tmptbl2";
-  private static final String TBL_NAME_3 = "tmptbl3";
-  private static final String MV_NAME_1 = "mv1";
-  private static final String MV_NAME_2 = "mv2";
-
-
-  @Test
-  public void testCleanerScenario1() throws Exception {
-    // create mock raw store
-    Configuration conf = new Configuration();
-    conf.set("metastore.materializations.invalidation.impl", "DISABLE");
-    // create mock handler
-    final IHMSHandler handler = mock(IHMSHandler.class);
-    // initialize invalidation cache (set conf to disable)
-    MaterializationsInvalidationCache.get().init(conf, handler);
-
-    // This is a dummy test, invalidation cache is not supposed to
-    // record any information.
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 1, 1, false);
-    int id = 2;
-    BasicTxnInfo txn2 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, id, id, false);
-    // Create tbl2 (nothing to do)
-    id = 3;
-    BasicTxnInfo txn3 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, id, id, false);
-    // Cleanup (current = 4, duration = 4) -> Does nothing
-    long removed = MaterializationsInvalidationCache.get().cleanup(0L);
-    Assert.assertEquals(0L, removed);
-    // Create mv1
-    Table mv1 = mock(Table.class);
-    when(mv1.getDbName()).thenReturn(DB_NAME);
-    when(mv1.getTableName()).thenReturn(MV_NAME_1);
-    CreationMetadata mockCM1 = new CreationMetadata(
-        DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1,
-        ImmutableSet.of(
-            DB_NAME + "." + TBL_NAME_1,
-            DB_NAME + "." + TBL_NAME_2));
-    // Create txn list (highWatermark=4;minOpenTxn=Long.MAX_VALUE)
-    mockCM1.setValidTxnList("3:" + Long.MAX_VALUE + "::");
-    when(mv1.getCreationMetadata()).thenReturn(mockCM1);
-    MaterializationsInvalidationCache.get().createMaterializedView(mockCM1.getDbName(), mockCM1.getTblName(),
-        mockCM1.getTablesUsed(), mockCM1.getValidTxnList());
-    // Format <txnId>$<table_name>:<hwm>:<minOpenWriteId>:<open_writeids>:<abort_writeids>$<table_name>
-    Map<String, Materialization> invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    id = 10;
-    BasicTxnInfo txn10 = createTxnInfo(DB_NAME, TBL_NAME_2, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, id, id, false);
-    id = 9;
-    BasicTxnInfo txn9 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, id, id, false);
-    // Cleanup (current = 12, duration = 4) -> Removes txn1, txn2, txn3
-    removed = MaterializationsInvalidationCache.get().cleanup(8L);
-    Assert.assertEquals(0L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    // Create mv2
-    Table mv2 = mock(Table.class);
-    when(mv2.getDbName()).thenReturn(DB_NAME);
-    when(mv2.getTableName()).thenReturn(MV_NAME_2);
-    CreationMetadata mockCM2 = new CreationMetadata(
-        DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2,
-        ImmutableSet.of(
-            DB_NAME + "." + TBL_NAME_1,
-            DB_NAME + "." + TBL_NAME_2));
-    // Create txn list (highWatermark=10;minOpenTxn=Long.MAX_VALUE)
-    mockCM2.setValidTxnList("10:" + Long.MAX_VALUE + "::");
-    when(mv2.getCreationMetadata()).thenReturn(mockCM2);
-    MaterializationsInvalidationCache.get().createMaterializedView(mockCM2.getDbName(), mockCM2.getTblName(),
-        mockCM2.getTablesUsed(), mockCM2.getValidTxnList());
-    when(mv2.getCreationMetadata()).thenReturn(mockCM2);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    // Create tbl3 (nothing to do)
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_3, 11, 11, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_3, 18, 18, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 14, 14, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 17, 17, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 16, 16, false);
-    // Cleanup (current = 20, duration = 4) -> Removes txn10, txn11
-    removed = MaterializationsInvalidationCache.get().cleanup(16L);
-    Assert.assertEquals(0L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 12, 12, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 15, 15, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 7, 7, false);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    // Cleanup (current = 24, duration = 4) -> Removes txn9, txn14, txn15, txn16, txn17, txn18
-    removed = MaterializationsInvalidationCache.get().cleanup(20L);
-    Assert.assertEquals(0L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertTrue(invalidationInfos.isEmpty());
-    // Cleanup (current = 28, duration = 4) -> Removes txn9
-    removed = MaterializationsInvalidationCache.get().cleanup(24L);
-    Assert.assertEquals(0L, removed);
-  }
-
-  @Test
-  public void testCleanerScenario2() throws Exception {
-    // create mock raw store
-    Configuration conf = new Configuration();
-    conf.set("metastore.materializations.invalidation.impl", "DEFAULT");
-    // create mock handler
-    final IHMSHandler handler = mock(IHMSHandler.class);
-    // initialize invalidation cache (set conf to default)
-    MaterializationsInvalidationCache.get().init(conf, handler);
-
-    // Scenario consists of the following steps:
-    // Create tbl1
-    // (t = 1) Insert row in tbl1
-    // (t = 2) Insert row in tbl1
-    // Create tbl2
-    // (t = 3) Insert row in tbl2
-    // Cleanup (current = 4, duration = 4) -> Does nothing
-    // Create mv1
-    // (t = 10) Insert row in tbl2
-    // (t = 9) Insert row in tbl1 (out of order)
-    // Cleanup (current = 12, duration = 4) -> Removes txn1, txn2, txn3
-    // Create mv2
-    // Create tbl3
-    // (t = 11) Insert row in tbl3
-    // (t = 18) Insert row in tbl3
-    // (t = 14) Insert row in tbl1
-    // (t = 17) Insert row in tbl1
-    // (t = 16) Insert row in tbl2
-    // Cleanup (current = 20, duration = 4) -> Removes txn10, txn11
-    // (t = 12) Insert row in tbl1
-    // (t = 15) Insert row in tbl2
-    // (t = 7) Insert row in tbl2
-    // Cleanup (current = 24, duration = 4) -> Removes txn9, txn14, txn15, txn16, txn17, txn18
-    // Create tbl1 (nothing to do)
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 1, 1, false);
-    int id = 2;
-    BasicTxnInfo txn2 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, id, id, false);
-    // Create tbl2 (nothing to do)
-    id = 3;
-    BasicTxnInfo txn3 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, id, id, false);
-    // Cleanup (current = 4, duration = 4) -> Does nothing
-    long removed = MaterializationsInvalidationCache.get().cleanup(0L);
-    Assert.assertEquals(0L, removed);
-    // Create mv1
-    Table mv1 = mock(Table.class);
-    when(mv1.getDbName()).thenReturn(DB_NAME);
-    when(mv1.getTableName()).thenReturn(MV_NAME_1);
-    CreationMetadata mockCM1 = new CreationMetadata(
-        DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1,
-        ImmutableSet.of(
-            DB_NAME + "." + TBL_NAME_1,
-            DB_NAME + "." + TBL_NAME_2));
-    // Create txn list (highWatermark=4;minOpenTxn=Long.MAX_VALUE)
-    mockCM1.setValidTxnList("3$" + DB_NAME + "." + TBL_NAME_1 + ":3:" + Long.MAX_VALUE + "::" +
-        "$" + DB_NAME + "." + TBL_NAME_2 + ":3:" + Long.MAX_VALUE + "::");
-    when(mv1.getCreationMetadata()).thenReturn(mockCM1);
-    MaterializationsInvalidationCache.get().createMaterializedView(mockCM1.getDbName(), mockCM1.getTblName(),
-        mockCM1.getTablesUsed(), mockCM1.getValidTxnList());
-    Map<String, Materialization> invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1));
-    Assert.assertEquals(0L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    id = 10;
-    BasicTxnInfo txn10 = createTxnInfo(DB_NAME, TBL_NAME_2, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, id, id, false);
-    id = 9;
-    BasicTxnInfo txn9 = createTxnInfo(DB_NAME, TBL_NAME_1, id);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, id, id, false);
-    // Cleanup (current = 12, duration = 4) -> Removes txn1, txn2, txn3
-    removed = MaterializationsInvalidationCache.get().cleanup(8L);
-    Assert.assertEquals(3L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1));
-    Assert.assertEquals(9L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    // Create mv2
-    Table mv2 = mock(Table.class);
-    when(mv2.getDbName()).thenReturn(DB_NAME);
-    when(mv2.getTableName()).thenReturn(MV_NAME_2);
-    CreationMetadata mockCM2 = new CreationMetadata(
-        DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2,
-        ImmutableSet.of(
-            DB_NAME + "." + TBL_NAME_1,
-            DB_NAME + "." + TBL_NAME_2));
-    // Create txn list (highWatermark=10;minOpenTxn=Long.MAX_VALUE)
-    mockCM2.setValidTxnList("10$" + DB_NAME + "." + TBL_NAME_1 + ":10:" + Long.MAX_VALUE + "::" +
-        "$" + DB_NAME + "." + TBL_NAME_2 + ":10:" + Long.MAX_VALUE + "::");
-    when(mv2.getCreationMetadata()).thenReturn(mockCM2);
-    MaterializationsInvalidationCache.get().createMaterializedView(mockCM2.getDbName(), mockCM2.getTblName(),
-        mockCM2.getTablesUsed(), mockCM2.getValidTxnList());
-    when(mv2.getCreationMetadata()).thenReturn(mockCM2);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertEquals(9L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    Assert.assertEquals(0L, invalidationInfos.get(MV_NAME_2).getInvalidationTime());
-    // Create tbl3 (nothing to do)
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_3, 11, 11, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_3, 18, 18, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 14, 14, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 17, 17, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 16, 16, false);
-    // Cleanup (current = 20, duration = 4) -> Removes txn10, txn11
-    removed = MaterializationsInvalidationCache.get().cleanup(16L);
-    Assert.assertEquals(2L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertEquals(9L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    Assert.assertEquals(14L, invalidationInfos.get(MV_NAME_2).getInvalidationTime());
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_1, 12, 12, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 15, 15, false);
-    MaterializationsInvalidationCache.get().notifyTableModification(
-        DB_NAME, TBL_NAME_2, 7, 7, false);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertEquals(7L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    Assert.assertEquals(12L, invalidationInfos.get(MV_NAME_2).getInvalidationTime());
-    // Cleanup (current = 24, duration = 4) -> Removes txn9, txn14, txn15, txn16, txn17, txn18
-    removed = MaterializationsInvalidationCache.get().cleanup(20L);
-    Assert.assertEquals(6L, removed);
-    invalidationInfos =
-        MaterializationsInvalidationCache.get().getMaterializationInvalidationInfo(
-            DB_NAME, ImmutableList.of(MV_NAME_1, MV_NAME_2));
-    Assert.assertEquals(7L, invalidationInfos.get(MV_NAME_1).getInvalidationTime());
-    Assert.assertEquals(12L, invalidationInfos.get(MV_NAME_2).getInvalidationTime());
-    // Cleanup (current = 28, duration = 4) -> Removes txn9
-    removed = MaterializationsInvalidationCache.get().cleanup(24L);
-    Assert.assertEquals(0L, removed);
-  }
-
-  private static BasicTxnInfo createTxnInfo(String dbName, String tableName, int i) {
-    BasicTxnInfo r = new BasicTxnInfo();
-    r.setDbname(dbName);
-    r.setTablename(tableName);
-    r.setTxnid(i);
-    r.setTime(i);
-    return r;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index efa3e7c..816a735 100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@ -1211,6 +1211,7 @@ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
     // Update the metadata for the materialized view
     CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
     cm.addToTablesUsed(dbName + "." + tableNames[1]);
+    cm.unsetMaterializationTime();
     client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
 
     List<String> partNames = new ArrayList<>();


[23/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index ec129ef..24ffadb 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@ -128,7 +128,7 @@ import org.slf4j.LoggerFactory;
 
     public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
 
-    public Map<String,Materialization> get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+    public Materialization get_materialization_invalidation_info(CreationMetadata creation_metadata, String validTxnList) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
 
     public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
 
@@ -548,7 +548,7 @@ import org.slf4j.LoggerFactory;
 
     public void get_table_objects_by_name_req(GetTablesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
-    public void get_materialization_invalidation_info(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+    public void get_materialization_invalidation_info(CreationMetadata creation_metadata, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
     public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
 
@@ -2125,21 +2125,21 @@ import org.slf4j.LoggerFactory;
       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result");
     }
 
-    public Map<String,Materialization> get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+    public Materialization get_materialization_invalidation_info(CreationMetadata creation_metadata, String validTxnList) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
     {
-      send_get_materialization_invalidation_info(dbname, tbl_names);
+      send_get_materialization_invalidation_info(creation_metadata, validTxnList);
       return recv_get_materialization_invalidation_info();
     }
 
-    public void send_get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws org.apache.thrift.TException
+    public void send_get_materialization_invalidation_info(CreationMetadata creation_metadata, String validTxnList) throws org.apache.thrift.TException
     {
       get_materialization_invalidation_info_args args = new get_materialization_invalidation_info_args();
-      args.setDbname(dbname);
-      args.setTbl_names(tbl_names);
+      args.setCreation_metadata(creation_metadata);
+      args.setValidTxnList(validTxnList);
       sendBase("get_materialization_invalidation_info", args);
     }
 
-    public Map<String,Materialization> recv_get_materialization_invalidation_info() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
+    public Materialization recv_get_materialization_invalidation_info() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException
     {
       get_materialization_invalidation_info_result result = new get_materialization_invalidation_info_result();
       receiveBase(result, "get_materialization_invalidation_info");
@@ -8346,32 +8346,32 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    public void get_materialization_invalidation_info(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+    public void get_materialization_invalidation_info(CreationMetadata creation_metadata, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
       checkReady();
-      get_materialization_invalidation_info_call method_call = new get_materialization_invalidation_info_call(dbname, tbl_names, resultHandler, this, ___protocolFactory, ___transport);
+      get_materialization_invalidation_info_call method_call = new get_materialization_invalidation_info_call(creation_metadata, validTxnList, resultHandler, this, ___protocolFactory, ___transport);
       this.___currentMethod = method_call;
       ___manager.call(method_call);
     }
 
     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_call extends org.apache.thrift.async.TAsyncMethodCall {
-      private String dbname;
-      private List<String> tbl_names;
-      public get_materialization_invalidation_info_call(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+      private CreationMetadata creation_metadata;
+      private String validTxnList;
+      public get_materialization_invalidation_info_call(CreationMetadata creation_metadata, String validTxnList, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
         super(client, protocolFactory, transport, resultHandler, false);
-        this.dbname = dbname;
-        this.tbl_names = tbl_names;
+        this.creation_metadata = creation_metadata;
+        this.validTxnList = validTxnList;
       }
 
       public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
         prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_materialization_invalidation_info", org.apache.thrift.protocol.TMessageType.CALL, 0));
         get_materialization_invalidation_info_args args = new get_materialization_invalidation_info_args();
-        args.setDbname(dbname);
-        args.setTbl_names(tbl_names);
+        args.setCreation_metadata(creation_metadata);
+        args.setValidTxnList(validTxnList);
         args.write(prot);
         prot.writeMessageEnd();
       }
 
-      public Map<String,Materialization> getResult() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException {
+      public Materialization getResult() throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException {
         if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
           throw new IllegalStateException("Method call not finished!");
         }
@@ -15395,7 +15395,7 @@ import org.slf4j.LoggerFactory;
       public get_materialization_invalidation_info_result getResult(I iface, get_materialization_invalidation_info_args args) throws org.apache.thrift.TException {
         get_materialization_invalidation_info_result result = new get_materialization_invalidation_info_result();
         try {
-          result.success = iface.get_materialization_invalidation_info(args.dbname, args.tbl_names);
+          result.success = iface.get_materialization_invalidation_info(args.creation_metadata, args.validTxnList);
         } catch (MetaException o1) {
           result.o1 = o1;
         } catch (InvalidOperationException o2) {
@@ -22471,7 +22471,7 @@ import org.slf4j.LoggerFactory;
       }
     }
 
-    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_materialization_invalidation_info_args, Map<String,Materialization>> {
+    @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_materialization_invalidation_info_args, Materialization> {
       public get_materialization_invalidation_info() {
         super("get_materialization_invalidation_info");
       }
@@ -22480,10 +22480,10 @@ import org.slf4j.LoggerFactory;
         return new get_materialization_invalidation_info_args();
       }
 
-      public AsyncMethodCallback<Map<String,Materialization>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
+      public AsyncMethodCallback<Materialization> getResultHandler(final AsyncFrameBuffer fb, final int seqid) {
         final org.apache.thrift.AsyncProcessFunction fcall = this;
-        return new AsyncMethodCallback<Map<String,Materialization>>() { 
-          public void onComplete(Map<String,Materialization> o) {
+        return new AsyncMethodCallback<Materialization>() { 
+          public void onComplete(Materialization o) {
             get_materialization_invalidation_info_result result = new get_materialization_invalidation_info_result();
             result.success = o;
             try {
@@ -22533,8 +22533,8 @@ import org.slf4j.LoggerFactory;
         return false;
       }
 
-      public void start(I iface, get_materialization_invalidation_info_args args, org.apache.thrift.async.AsyncMethodCallback<Map<String,Materialization>> resultHandler) throws TException {
-        iface.get_materialization_invalidation_info(args.dbname, args.tbl_names,resultHandler);
+      public void start(I iface, get_materialization_invalidation_info_args args, org.apache.thrift.async.AsyncMethodCallback<Materialization> resultHandler) throws TException {
+        iface.get_materialization_invalidation_info(args.creation_metadata, args.validTxnList,resultHandler);
       }
     }
 
@@ -42384,13 +42384,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list952.size);
-                  String _elem953;
-                  for (int _i954 = 0; _i954 < _list952.size; ++_i954)
+                  org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list944.size);
+                  String _elem945;
+                  for (int _i946 = 0; _i946 < _list944.size; ++_i946)
                   {
-                    _elem953 = iprot.readString();
-                    struct.success.add(_elem953);
+                    _elem945 = iprot.readString();
+                    struct.success.add(_elem945);
                   }
                   iprot.readListEnd();
                 }
@@ -42425,9 +42425,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter955 : struct.success)
+            for (String _iter947 : struct.success)
             {
-              oprot.writeString(_iter955);
+              oprot.writeString(_iter947);
             }
             oprot.writeListEnd();
           }
@@ -42466,9 +42466,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter956 : struct.success)
+            for (String _iter948 : struct.success)
             {
-              oprot.writeString(_iter956);
+              oprot.writeString(_iter948);
             }
           }
         }
@@ -42483,13 +42483,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list957.size);
-            String _elem958;
-            for (int _i959 = 0; _i959 < _list957.size; ++_i959)
+            org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list949.size);
+            String _elem950;
+            for (int _i951 = 0; _i951 < _list949.size; ++_i951)
             {
-              _elem958 = iprot.readString();
-              struct.success.add(_elem958);
+              _elem950 = iprot.readString();
+              struct.success.add(_elem950);
             }
           }
           struct.setSuccessIsSet(true);
@@ -43143,13 +43143,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list960.size);
-                  String _elem961;
-                  for (int _i962 = 0; _i962 < _list960.size; ++_i962)
+                  org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list952.size);
+                  String _elem953;
+                  for (int _i954 = 0; _i954 < _list952.size; ++_i954)
                   {
-                    _elem961 = iprot.readString();
-                    struct.success.add(_elem961);
+                    _elem953 = iprot.readString();
+                    struct.success.add(_elem953);
                   }
                   iprot.readListEnd();
                 }
@@ -43184,9 +43184,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter963 : struct.success)
+            for (String _iter955 : struct.success)
             {
-              oprot.writeString(_iter963);
+              oprot.writeString(_iter955);
             }
             oprot.writeListEnd();
           }
@@ -43225,9 +43225,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter964 : struct.success)
+            for (String _iter956 : struct.success)
             {
-              oprot.writeString(_iter964);
+              oprot.writeString(_iter956);
             }
           }
         }
@@ -43242,13 +43242,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list965.size);
-            String _elem966;
-            for (int _i967 = 0; _i967 < _list965.size; ++_i967)
+            org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list957.size);
+            String _elem958;
+            for (int _i959 = 0; _i959 < _list957.size; ++_i959)
             {
-              _elem966 = iprot.readString();
-              struct.success.add(_elem966);
+              _elem958 = iprot.readString();
+              struct.success.add(_elem958);
             }
           }
           struct.setSuccessIsSet(true);
@@ -47855,16 +47855,16 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                 {
-                  org.apache.thrift.protocol.TMap _map968 = iprot.readMapBegin();
-                  struct.success = new HashMap<String,Type>(2*_map968.size);
-                  String _key969;
-                  Type _val970;
-                  for (int _i971 = 0; _i971 < _map968.size; ++_i971)
+                  org.apache.thrift.protocol.TMap _map960 = iprot.readMapBegin();
+                  struct.success = new HashMap<String,Type>(2*_map960.size);
+                  String _key961;
+                  Type _val962;
+                  for (int _i963 = 0; _i963 < _map960.size; ++_i963)
                   {
-                    _key969 = iprot.readString();
-                    _val970 = new Type();
-                    _val970.read(iprot);
-                    struct.success.put(_key969, _val970);
+                    _key961 = iprot.readString();
+                    _val962 = new Type();
+                    _val962.read(iprot);
+                    struct.success.put(_key961, _val962);
                   }
                   iprot.readMapEnd();
                 }
@@ -47899,10 +47899,10 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Map.Entry<String, Type> _iter972 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter964 : struct.success.entrySet())
             {
-              oprot.writeString(_iter972.getKey());
-              _iter972.getValue().write(oprot);
+              oprot.writeString(_iter964.getKey());
+              _iter964.getValue().write(oprot);
             }
             oprot.writeMapEnd();
           }
@@ -47941,10 +47941,10 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Map.Entry<String, Type> _iter973 : struct.success.entrySet())
+            for (Map.Entry<String, Type> _iter965 : struct.success.entrySet())
             {
-              oprot.writeString(_iter973.getKey());
-              _iter973.getValue().write(oprot);
+              oprot.writeString(_iter965.getKey());
+              _iter965.getValue().write(oprot);
             }
           }
         }
@@ -47959,16 +47959,16 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TMap _map974 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new HashMap<String,Type>(2*_map974.size);
-            String _key975;
-            Type _val976;
-            for (int _i977 = 0; _i977 < _map974.size; ++_i977)
+            org.apache.thrift.protocol.TMap _map966 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new HashMap<String,Type>(2*_map966.size);
+            String _key967;
+            Type _val968;
+            for (int _i969 = 0; _i969 < _map966.size; ++_i969)
             {
-              _key975 = iprot.readString();
-              _val976 = new Type();
-              _val976.read(iprot);
-              struct.success.put(_key975, _val976);
+              _key967 = iprot.readString();
+              _val968 = new Type();
+              _val968.read(iprot);
+              struct.success.put(_key967, _val968);
             }
           }
           struct.setSuccessIsSet(true);
@@ -49003,14 +49003,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list978 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list978.size);
-                  FieldSchema _elem979;
-                  for (int _i980 = 0; _i980 < _list978.size; ++_i980)
+                  org.apache.thrift.protocol.TList _list970 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list970.size);
+                  FieldSchema _elem971;
+                  for (int _i972 = 0; _i972 < _list970.size; ++_i972)
                   {
-                    _elem979 = new FieldSchema();
-                    _elem979.read(iprot);
-                    struct.success.add(_elem979);
+                    _elem971 = new FieldSchema();
+                    _elem971.read(iprot);
+                    struct.success.add(_elem971);
                   }
                   iprot.readListEnd();
                 }
@@ -49063,9 +49063,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter981 : struct.success)
+            for (FieldSchema _iter973 : struct.success)
             {
-              _iter981.write(oprot);
+              _iter973.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -49120,9 +49120,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter982 : struct.success)
+            for (FieldSchema _iter974 : struct.success)
             {
-              _iter982.write(oprot);
+              _iter974.write(oprot);
             }
           }
         }
@@ -49143,14 +49143,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list983.size);
-            FieldSchema _elem984;
-            for (int _i985 = 0; _i985 < _list983.size; ++_i985)
+            org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list975.size);
+            FieldSchema _elem976;
+            for (int _i977 = 0; _i977 < _list975.size; ++_i977)
             {
-              _elem984 = new FieldSchema();
-              _elem984.read(iprot);
-              struct.success.add(_elem984);
+              _elem976 = new FieldSchema();
+              _elem976.read(iprot);
+              struct.success.add(_elem976);
             }
           }
           struct.setSuccessIsSet(true);
@@ -50304,14 +50304,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list986.size);
-                  FieldSchema _elem987;
-                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
+                  org.apache.thrift.protocol.TList _list978 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list978.size);
+                  FieldSchema _elem979;
+                  for (int _i980 = 0; _i980 < _list978.size; ++_i980)
                   {
-                    _elem987 = new FieldSchema();
-                    _elem987.read(iprot);
-                    struct.success.add(_elem987);
+                    _elem979 = new FieldSchema();
+                    _elem979.read(iprot);
+                    struct.success.add(_elem979);
                   }
                   iprot.readListEnd();
                 }
@@ -50364,9 +50364,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter989 : struct.success)
+            for (FieldSchema _iter981 : struct.success)
             {
-              _iter989.write(oprot);
+              _iter981.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -50421,9 +50421,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter990 : struct.success)
+            for (FieldSchema _iter982 : struct.success)
             {
-              _iter990.write(oprot);
+              _iter982.write(oprot);
             }
           }
         }
@@ -50444,14 +50444,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list991.size);
-            FieldSchema _elem992;
-            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
+            org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list983.size);
+            FieldSchema _elem984;
+            for (int _i985 = 0; _i985 < _list983.size; ++_i985)
             {
-              _elem992 = new FieldSchema();
-              _elem992.read(iprot);
-              struct.success.add(_elem992);
+              _elem984 = new FieldSchema();
+              _elem984.read(iprot);
+              struct.success.add(_elem984);
             }
           }
           struct.setSuccessIsSet(true);
@@ -51496,14 +51496,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list994.size);
-                  FieldSchema _elem995;
-                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
+                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list986.size);
+                  FieldSchema _elem987;
+                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
                   {
-                    _elem995 = new FieldSchema();
-                    _elem995.read(iprot);
-                    struct.success.add(_elem995);
+                    _elem987 = new FieldSchema();
+                    _elem987.read(iprot);
+                    struct.success.add(_elem987);
                   }
                   iprot.readListEnd();
                 }
@@ -51556,9 +51556,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter997 : struct.success)
+            for (FieldSchema _iter989 : struct.success)
             {
-              _iter997.write(oprot);
+              _iter989.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -51613,9 +51613,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter998 : struct.success)
+            for (FieldSchema _iter990 : struct.success)
             {
-              _iter998.write(oprot);
+              _iter990.write(oprot);
             }
           }
         }
@@ -51636,14 +51636,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list999.size);
-            FieldSchema _elem1000;
-            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
+            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list991.size);
+            FieldSchema _elem992;
+            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
             {
-              _elem1000 = new FieldSchema();
-              _elem1000.read(iprot);
-              struct.success.add(_elem1000);
+              _elem992 = new FieldSchema();
+              _elem992.read(iprot);
+              struct.success.add(_elem992);
             }
           }
           struct.setSuccessIsSet(true);
@@ -52797,14 +52797,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
-                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
-                  FieldSchema _elem1003;
-                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
+                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
+                  struct.success = new ArrayList<FieldSchema>(_list994.size);
+                  FieldSchema _elem995;
+                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
                   {
-                    _elem1003 = new FieldSchema();
-                    _elem1003.read(iprot);
-                    struct.success.add(_elem1003);
+                    _elem995 = new FieldSchema();
+                    _elem995.read(iprot);
+                    struct.success.add(_elem995);
                   }
                   iprot.readListEnd();
                 }
@@ -52857,9 +52857,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (FieldSchema _iter1005 : struct.success)
+            for (FieldSchema _iter997 : struct.success)
             {
-              _iter1005.write(oprot);
+              _iter997.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -52914,9 +52914,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (FieldSchema _iter1006 : struct.success)
+            for (FieldSchema _iter998 : struct.success)
             {
-              _iter1006.write(oprot);
+              _iter998.write(oprot);
             }
           }
         }
@@ -52937,14 +52937,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(4);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<FieldSchema>(_list1007.size);
-            FieldSchema _elem1008;
-            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
+            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<FieldSchema>(_list999.size);
+            FieldSchema _elem1000;
+            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
             {
-              _elem1008 = new FieldSchema();
-              _elem1008.read(iprot);
-              struct.success.add(_elem1008);
+              _elem1000 = new FieldSchema();
+              _elem1000.read(iprot);
+              struct.success.add(_elem1000);
             }
           }
           struct.setSuccessIsSet(true);
@@ -56073,14 +56073,14 @@ import org.slf4j.LoggerFactory;
             case 2: // PRIMARY_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
-                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1010.size);
-                  SQLPrimaryKey _elem1011;
-                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
+                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
+                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1002.size);
+                  SQLPrimaryKey _elem1003;
+                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
                   {
-                    _elem1011 = new SQLPrimaryKey();
-                    _elem1011.read(iprot);
-                    struct.primaryKeys.add(_elem1011);
+                    _elem1003 = new SQLPrimaryKey();
+                    _elem1003.read(iprot);
+                    struct.primaryKeys.add(_elem1003);
                   }
                   iprot.readListEnd();
                 }
@@ -56092,14 +56092,14 @@ import org.slf4j.LoggerFactory;
             case 3: // FOREIGN_KEYS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1013 = iprot.readListBegin();
-                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1013.size);
-                  SQLForeignKey _elem1014;
-                  for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015)
+                  org.apache.thrift.protocol.TList _list1005 = iprot.readListBegin();
+                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1005.size);
+                  SQLForeignKey _elem1006;
+                  for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007)
                   {
-                    _elem1014 = new SQLForeignKey();
-                    _elem1014.read(iprot);
-                    struct.foreignKeys.add(_elem1014);
+                    _elem1006 = new SQLForeignKey();
+                    _elem1006.read(iprot);
+                    struct.foreignKeys.add(_elem1006);
                   }
                   iprot.readListEnd();
                 }
@@ -56111,14 +56111,14 @@ import org.slf4j.LoggerFactory;
             case 4: // UNIQUE_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin();
-                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1016.size);
-                  SQLUniqueConstraint _elem1017;
-                  for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018)
+                  org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin();
+                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1008.size);
+                  SQLUniqueConstraint _elem1009;
+                  for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010)
                   {
-                    _elem1017 = new SQLUniqueConstraint();
-                    _elem1017.read(iprot);
-                    struct.uniqueConstraints.add(_elem1017);
+                    _elem1009 = new SQLUniqueConstraint();
+                    _elem1009.read(iprot);
+                    struct.uniqueConstraints.add(_elem1009);
                   }
                   iprot.readListEnd();
                 }
@@ -56130,14 +56130,14 @@ import org.slf4j.LoggerFactory;
             case 5: // NOT_NULL_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1019 = iprot.readListBegin();
-                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1019.size);
-                  SQLNotNullConstraint _elem1020;
-                  for (int _i1021 = 0; _i1021 < _list1019.size; ++_i1021)
+                  org.apache.thrift.protocol.TList _list1011 = iprot.readListBegin();
+                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1011.size);
+                  SQLNotNullConstraint _elem1012;
+                  for (int _i1013 = 0; _i1013 < _list1011.size; ++_i1013)
                   {
-                    _elem1020 = new SQLNotNullConstraint();
-                    _elem1020.read(iprot);
-                    struct.notNullConstraints.add(_elem1020);
+                    _elem1012 = new SQLNotNullConstraint();
+                    _elem1012.read(iprot);
+                    struct.notNullConstraints.add(_elem1012);
                   }
                   iprot.readListEnd();
                 }
@@ -56149,14 +56149,14 @@ import org.slf4j.LoggerFactory;
             case 6: // DEFAULT_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1022 = iprot.readListBegin();
-                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1022.size);
-                  SQLDefaultConstraint _elem1023;
-                  for (int _i1024 = 0; _i1024 < _list1022.size; ++_i1024)
+                  org.apache.thrift.protocol.TList _list1014 = iprot.readListBegin();
+                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1014.size);
+                  SQLDefaultConstraint _elem1015;
+                  for (int _i1016 = 0; _i1016 < _list1014.size; ++_i1016)
                   {
-                    _elem1023 = new SQLDefaultConstraint();
-                    _elem1023.read(iprot);
-                    struct.defaultConstraints.add(_elem1023);
+                    _elem1015 = new SQLDefaultConstraint();
+                    _elem1015.read(iprot);
+                    struct.defaultConstraints.add(_elem1015);
                   }
                   iprot.readListEnd();
                 }
@@ -56168,14 +56168,14 @@ import org.slf4j.LoggerFactory;
             case 7: // CHECK_CONSTRAINTS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1025 = iprot.readListBegin();
-                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1025.size);
-                  SQLCheckConstraint _elem1026;
-                  for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027)
+                  org.apache.thrift.protocol.TList _list1017 = iprot.readListBegin();
+                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1017.size);
+                  SQLCheckConstraint _elem1018;
+                  for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019)
                   {
-                    _elem1026 = new SQLCheckConstraint();
-                    _elem1026.read(iprot);
-                    struct.checkConstraints.add(_elem1026);
+                    _elem1018 = new SQLCheckConstraint();
+                    _elem1018.read(iprot);
+                    struct.checkConstraints.add(_elem1018);
                   }
                   iprot.readListEnd();
                 }
@@ -56206,9 +56206,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
-            for (SQLPrimaryKey _iter1028 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter1020 : struct.primaryKeys)
             {
-              _iter1028.write(oprot);
+              _iter1020.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56218,9 +56218,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
-            for (SQLForeignKey _iter1029 : struct.foreignKeys)
+            for (SQLForeignKey _iter1021 : struct.foreignKeys)
             {
-              _iter1029.write(oprot);
+              _iter1021.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56230,9 +56230,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size()));
-            for (SQLUniqueConstraint _iter1030 : struct.uniqueConstraints)
+            for (SQLUniqueConstraint _iter1022 : struct.uniqueConstraints)
             {
-              _iter1030.write(oprot);
+              _iter1022.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56242,9 +56242,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size()));
-            for (SQLNotNullConstraint _iter1031 : struct.notNullConstraints)
+            for (SQLNotNullConstraint _iter1023 : struct.notNullConstraints)
             {
-              _iter1031.write(oprot);
+              _iter1023.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56254,9 +56254,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size()));
-            for (SQLDefaultConstraint _iter1032 : struct.defaultConstraints)
+            for (SQLDefaultConstraint _iter1024 : struct.defaultConstraints)
             {
-              _iter1032.write(oprot);
+              _iter1024.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56266,9 +56266,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size()));
-            for (SQLCheckConstraint _iter1033 : struct.checkConstraints)
+            for (SQLCheckConstraint _iter1025 : struct.checkConstraints)
             {
-              _iter1033.write(oprot);
+              _iter1025.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -56320,54 +56320,54 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetPrimaryKeys()) {
           {
             oprot.writeI32(struct.primaryKeys.size());
-            for (SQLPrimaryKey _iter1034 : struct.primaryKeys)
+            for (SQLPrimaryKey _iter1026 : struct.primaryKeys)
             {
-              _iter1034.write(oprot);
+              _iter1026.write(oprot);
             }
           }
         }
         if (struct.isSetForeignKeys()) {
           {
             oprot.writeI32(struct.foreignKeys.size());
-            for (SQLForeignKey _iter1035 : struct.foreignKeys)
+            for (SQLForeignKey _iter1027 : struct.foreignKeys)
             {
-              _iter1035.write(oprot);
+              _iter1027.write(oprot);
             }
           }
         }
         if (struct.isSetUniqueConstraints()) {
           {
             oprot.writeI32(struct.uniqueConstraints.size());
-            for (SQLUniqueConstraint _iter1036 : struct.uniqueConstraints)
+            for (SQLUniqueConstraint _iter1028 : struct.uniqueConstraints)
             {
-              _iter1036.write(oprot);
+              _iter1028.write(oprot);
             }
           }
         }
         if (struct.isSetNotNullConstraints()) {
           {
             oprot.writeI32(struct.notNullConstraints.size());
-            for (SQLNotNullConstraint _iter1037 : struct.notNullConstraints)
+            for (SQLNotNullConstraint _iter1029 : struct.notNullConstraints)
             {
-              _iter1037.write(oprot);
+              _iter1029.write(oprot);
             }
           }
         }
         if (struct.isSetDefaultConstraints()) {
           {
             oprot.writeI32(struct.defaultConstraints.size());
-            for (SQLDefaultConstraint _iter1038 : struct.defaultConstraints)
+            for (SQLDefaultConstraint _iter1030 : struct.defaultConstraints)
             {
-              _iter1038.write(oprot);
+              _iter1030.write(oprot);
             }
           }
         }
         if (struct.isSetCheckConstraints()) {
           {
             oprot.writeI32(struct.checkConstraints.size());
-            for (SQLCheckConstraint _iter1039 : struct.checkConstraints)
+            for (SQLCheckConstraint _iter1031 : struct.checkConstraints)
             {
-              _iter1039.write(oprot);
+              _iter1031.write(oprot);
             }
           }
         }
@@ -56384,84 +56384,84 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list1040 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1040.size);
-            SQLPrimaryKey _elem1041;
-            for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042)
+            org.apache.thrift.protocol.TList _list1032 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1032.size);
+            SQLPrimaryKey _elem1033;
+            for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
             {
-              _elem1041 = new SQLPrimaryKey();
-              _elem1041.read(iprot);
-              struct.primaryKeys.add(_elem1041);
+              _elem1033 = new SQLPrimaryKey();
+              _elem1033.read(iprot);
+              struct.primaryKeys.add(_elem1033);
             }
           }
           struct.setPrimaryKeysIsSet(true);
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1043 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1043.size);
-            SQLForeignKey _elem1044;
-            for (int _i1045 = 0; _i1045 < _list1043.size; ++_i1045)
+            org.apache.thrift.protocol.TList _list1035 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1035.size);
+            SQLForeignKey _elem1036;
+            for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037)
             {
-              _elem1044 = new SQLForeignKey();
-              _elem1044.read(iprot);
-              struct.foreignKeys.add(_elem1044);
+              _elem1036 = new SQLForeignKey();
+              _elem1036.read(iprot);
+              struct.foreignKeys.add(_elem1036);
             }
           }
           struct.setForeignKeysIsSet(true);
         }
         if (incoming.get(3)) {
           {
-            org.apache.thrift.protocol.TList _list1046 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1046.size);
-            SQLUniqueConstraint _elem1047;
-            for (int _i1048 = 0; _i1048 < _list1046.size; ++_i1048)
+            org.apache.thrift.protocol.TList _list1038 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1038.size);
+            SQLUniqueConstraint _elem1039;
+            for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040)
             {
-              _elem1047 = new SQLUniqueConstraint();
-              _elem1047.read(iprot);
-              struct.uniqueConstraints.add(_elem1047);
+              _elem1039 = new SQLUniqueConstraint();
+              _elem1039.read(iprot);
+              struct.uniqueConstraints.add(_elem1039);
             }
           }
           struct.setUniqueConstraintsIsSet(true);
         }
         if (incoming.get(4)) {
           {
-            org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1049.size);
-            SQLNotNullConstraint _elem1050;
-            for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051)
+            org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1041.size);
+            SQLNotNullConstraint _elem1042;
+            for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043)
             {
-              _elem1050 = new SQLNotNullConstraint();
-              _elem1050.read(iprot);
-              struct.notNullConstraints.add(_elem1050);
+              _elem1042 = new SQLNotNullConstraint();
+              _elem1042.read(iprot);
+              struct.notNullConstraints.add(_elem1042);
             }
           }
           struct.setNotNullConstraintsIsSet(true);
         }
         if (incoming.get(5)) {
           {
-            org.apache.thrift.protocol.TList _list1052 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1052.size);
-            SQLDefaultConstraint _elem1053;
-            for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054)
+            org.apache.thrift.protocol.TList _list1044 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1044.size);
+            SQLDefaultConstraint _elem1045;
+            for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046)
             {
-              _elem1053 = new SQLDefaultConstraint();
-              _elem1053.read(iprot);
-              struct.defaultConstraints.add(_elem1053);
+              _elem1045 = new SQLDefaultConstraint();
+              _elem1045.read(iprot);
+              struct.defaultConstraints.add(_elem1045);
             }
           }
           struct.setDefaultConstraintsIsSet(true);
         }
         if (incoming.get(6)) {
           {
-            org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1055.size);
-            SQLCheckConstraint _elem1056;
-            for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057)
+            org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1047.size);
+            SQLCheckConstraint _elem1048;
+            for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049)
             {
-              _elem1056 = new SQLCheckConstraint();
-              _elem1056.read(iprot);
-              struct.checkConstraints.add(_elem1056);
+              _elem1048 = new SQLCheckConstraint();
+              _elem1048.read(iprot);
+              struct.checkConstraints.add(_elem1048);
             }
           }
           struct.setCheckConstraintsIsSet(true);
@@ -65611,13 +65611,13 @@ import org.slf4j.LoggerFactory;
             case 3: // PART_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin();
-                  struct.partNames = new ArrayList<String>(_list1058.size);
-                  String _elem1059;
-                  for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060)
+                  org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin();
+                  struct.partNames = new ArrayList<String>(_list1050.size);
+                  String _elem1051;
+                  for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052)
                   {
-                    _elem1059 = iprot.readString();
-                    struct.partNames.add(_elem1059);
+                    _elem1051 = iprot.readString();
+                    struct.partNames.add(_elem1051);
                   }
                   iprot.readListEnd();
                 }
@@ -65653,9 +65653,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
-            for (String _iter1061 : struct.partNames)
+            for (String _iter1053 : struct.partNames)
             {
-              oprot.writeString(_iter1061);
+              oprot.writeString(_iter1053);
             }
             oprot.writeListEnd();
           }
@@ -65698,9 +65698,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetPartNames()) {
           {
             oprot.writeI32(struct.partNames.size());
-            for (String _iter1062 : struct.partNames)
+            for (String _iter1054 : struct.partNames)
             {
-              oprot.writeString(_iter1062);
+              oprot.writeString(_iter1054);
             }
           }
         }
@@ -65720,13 +65720,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.partNames = new ArrayList<String>(_list1063.size);
-            String _elem1064;
-            for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
+            org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.partNames = new ArrayList<String>(_list1055.size);
+            String _elem1056;
+            for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057)
             {
-              _elem1064 = iprot.readString();
-              struct.partNames.add(_elem1064);
+              _elem1056 = iprot.readString();
+              struct.partNames.add(_elem1056);
             }
           }
           struct.setPartNamesIsSet(true);
@@ -66951,13 +66951,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1066.size);
-                  String _elem1067;
-                  for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
+                  org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1058.size);
+                  String _elem1059;
+                  for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060)
                   {
-                    _elem1067 = iprot.readString();
-                    struct.success.add(_elem1067);
+                    _elem1059 = iprot.readString();
+                    struct.success.add(_elem1059);
                   }
                   iprot.readListEnd();
                 }
@@ -66992,9 +66992,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1069 : struct.success)
+            for (String _iter1061 : struct.success)
             {
-              oprot.writeString(_iter1069);
+              oprot.writeString(_iter1061);
             }
             oprot.writeListEnd();
           }
@@ -67033,9 +67033,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1070 : struct.success)
+            for (String _iter1062 : struct.success)
             {
-              oprot.writeString(_iter1070);
+              oprot.writeString(_iter1062);
             }
           }
         }
@@ -67050,13 +67050,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1071.size);
-            String _elem1072;
-            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
+            org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1063.size);
+            String _elem1064;
+            for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
             {
-              _elem1072 = iprot.readString();
-              struct.success.add(_elem1072);
+              _elem1064 = iprot.readString();
+              struct.success.add(_elem1064);
             }
           }
           struct.setSuccessIsSet(true);
@@ -68030,13 +68030,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1074.size);
-                  String _elem1075;
-                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
+                  org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1066.size);
+                  String _elem1067;
+                  for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
                   {
-                    _elem1075 = iprot.readString();
-                    struct.success.add(_elem1075);
+                    _elem1067 = iprot.readString();
+                    struct.success.add(_elem1067);
                   }
                   iprot.readListEnd();
                 }
@@ -68071,9 +68071,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1077 : struct.success)
+            for (String _iter1069 : struct.success)
             {
-              oprot.writeString(_iter1077);
+              oprot.writeString(_iter1069);
             }
             oprot.writeListEnd();
           }
@@ -68112,9 +68112,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1078 : struct.success)
+            for (String _iter1070 : struct.success)
             {
-              oprot.writeString(_iter1078);
+              oprot.writeString(_iter1070);
             }
           }
         }
@@ -68129,13 +68129,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1079.size);
-            String _elem1080;
-            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
+            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1071.size);
+            String _elem1072;
+            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
             {
-              _elem1080 = iprot.readString();
-              struct.success.add(_elem1080);
+              _elem1072 = iprot.readString();
+              struct.success.add(_elem1072);
             }
           }
           struct.setSuccessIsSet(true);
@@ -68901,13 +68901,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1082.size);
-                  String _elem1083;
-                  for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
+                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1074.size);
+                  String _elem1075;
+                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
                   {
-                    _elem1083 = iprot.readString();
-                    struct.success.add(_elem1083);
+                    _elem1075 = iprot.readString();
+                    struct.success.add(_elem1075);
                   }
                   iprot.readListEnd();
                 }
@@ -68942,9 +68942,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1085 : struct.success)
+            for (String _iter1077 : struct.success)
             {
-              oprot.writeString(_iter1085);
+              oprot.writeString(_iter1077);
             }
             oprot.writeListEnd();
           }
@@ -68983,9 +68983,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1086 : struct.success)
+            for (String _iter1078 : struct.success)
             {
-              oprot.writeString(_iter1086);
+              oprot.writeString(_iter1078);
             }
           }
         }
@@ -69000,13 +69000,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1087.size);
-            String _elem1088;
-            for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
+            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1079.size);
+            String _elem1080;
+            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
             {
-              _elem1088 = iprot.readString();
-              struct.success.add(_elem1088);
+              _elem1080 = iprot.readString();
+              struct.success.add(_elem1080);
             }
           }
           struct.setSuccessIsSet(true);
@@ -69511,13 +69511,13 @@ import org.slf4j.LoggerFactory;
             case 3: // TBL_TYPES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
-                  struct.tbl_types = new ArrayList<String>(_list1090.size);
-                  String _elem1091;
-                  for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
+                  org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
+                  struct.tbl_types = new ArrayList<String>(_list1082.size);
+                  String _elem1083;
+                  for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
                   {
-                    _elem1091 = iprot.readString();
-                    struct.tbl_types.add(_elem1091);
+                    _elem1083 = iprot.readString();
+                    struct.tbl_types.add(_elem1083);
                   }
                   iprot.readListEnd();
                 }
@@ -69553,9 +69553,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
-            for (String _iter1093 : struct.tbl_types)
+            for (String _iter1085 : struct.tbl_types)
             {
-              oprot.writeString(_iter1093);
+              oprot.writeString(_iter1085);
             }
             oprot.writeListEnd();
           }
@@ -69598,9 +69598,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetTbl_types()) {
           {
             oprot.writeI32(struct.tbl_types.size());
-            for (String _iter1094 : struct.tbl_types)
+            for (String _iter1086 : struct.tbl_types)
             {
-              oprot.writeString(_iter1094);
+              oprot.writeString(_iter1086);
             }
           }
         }
@@ -69620,13 +69620,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(2)) {
           {
-            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_types = new ArrayList<String>(_list1095.size);
-            String _elem1096;
-            for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
+            org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_types = new ArrayList<String>(_list1087.size);
+            String _elem1088;
+            for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
             {
-              _elem1096 = iprot.readString();
-              struct.tbl_types.add(_elem1096);
+              _elem1088 = iprot.readString();
+              struct.tbl_types.add(_elem1088);
             }
           }
           struct.setTbl_typesIsSet(true);
@@ -70032,14 +70032,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
-                  struct.success = new ArrayList<TableMeta>(_list1098.size);
-                  TableMeta _elem1099;
-                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
+                  org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
+                  struct.success = new ArrayList<TableMeta>(_list1090.size);
+                  TableMeta _elem1091;
+                  for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
                   {
-                    _elem1099 = new TableMeta();
-                    _elem1099.read(iprot);
-                    struct.success.add(_elem1099);
+                    _elem1091 = new TableMeta();
+                    _elem1091.read(iprot);
+                    struct.success.add(_elem1091);
                   }
                   iprot.readListEnd();
                 }
@@ -70074,9 +70074,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (TableMeta _iter1101 : struct.success)
+            for (TableMeta _iter1093 : struct.success)
             {
-              _iter1101.write(oprot);
+              _iter1093.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -70115,9 +70115,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (TableMeta _iter1102 : struct.success)
+            for (TableMeta _iter1094 : struct.success)
             {
-              _iter1102.write(oprot);
+              _iter1094.write(oprot);
             }
           }
         }
@@ -70132,14 +70132,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<TableMeta>(_list1103.size);
-            TableMeta _elem1104;
-            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
+            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<TableMeta>(_list1095.size);
+            TableMeta _elem1096;
+            for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
             {
-              _elem1104 = new TableMeta();
-              _elem1104.read(iprot);
-              struct.success.add(_elem1104);
+              _elem1096 = new TableMeta();
+              _elem1096.read(iprot);
+              struct.success.add(_elem1096);
             }
           }
           struct.setSuccessIsSet(true);
@@ -70905,13 +70905,13 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
-                  struct.success = new ArrayList<String>(_list1106.size);
-                  String _elem1107;
-                  for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
+                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
+                  struct.success = new ArrayList<String>(_list1098.size);
+                  String _elem1099;
+                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
                   {
-                    _elem1107 = iprot.readString();
-                    struct.success.add(_elem1107);
+                    _elem1099 = iprot.readString();
+                    struct.success.add(_elem1099);
                   }
                   iprot.readListEnd();
                 }
@@ -70946,9 +70946,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-            for (String _iter1109 : struct.success)
+            for (String _iter1101 : struct.success)
             {
-              oprot.writeString(_iter1109);
+              oprot.writeString(_iter1101);
             }
             oprot.writeListEnd();
           }
@@ -70987,9 +70987,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (String _iter1110 : struct.success)
+            for (String _iter1102 : struct.success)
             {
-              oprot.writeString(_iter1110);
+              oprot.writeString(_iter1102);
             }
           }
         }
@@ -71004,13 +71004,13 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(2);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.success = new ArrayList<String>(_list1111.size);
-            String _elem1112;
-            for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
+            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.success = new ArrayList<String>(_list1103.size);
+            String _elem1104;
+            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
             {
-              _elem1112 = iprot.readString();
-              struct.success.add(_elem1112);
+              _elem1104 = iprot.readString();
+              struct.success.add(_elem1104);
             }
           }
           struct.setSuccessIsSet(true);
@@ -72463,13 +72463,13 @@ import org.slf4j.LoggerFactory;
             case 2: // TBL_NAMES
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
-                  struct.tbl_names = new ArrayList<String>(_list1114.size);
-                  String _elem1115;
-                  for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
+                  org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
+                  struct.tbl_names = new ArrayList<String>(_list1106.size);
+                  String _elem1107;
+                  for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
                   {
-                    _elem1115 = iprot.readString();
-                    struct.tbl_names.add(_elem1115);
+                    _elem1107 = iprot.readString();
+                    struct.tbl_names.add(_elem1107);
                   }
                   iprot.readListEnd();
                 }
@@ -72500,9 +72500,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size()));
-            for (String _iter1117 : struct.tbl_names)
+            for (String _iter1109 : struct.tbl_names)
             {
-              oprot.writeString(_iter1117);
+              oprot.writeString(_iter1109);
             }
             oprot.writeListEnd();
           }
@@ -72539,9 +72539,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetTbl_names()) {
           {
             oprot.writeI32(struct.tbl_names.size());
-            for (String _iter1118 : struct.tbl_names)
+            for (String _iter1110 : struct.tbl_names)
             {
-              oprot.writeString(_iter1118);
+              oprot.writeString(_iter1110);
             }
           }
         }
@@ -72557,13 +72557,13 @@ import org.slf4j.LoggerFactory;
         }
         if (incoming.get(1)) {
           {
-            org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-            struct.tbl_names = new ArrayList<String>(_list1119.size);
-            String _elem1120;
-            for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
+            org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+            struct.tbl_names = new ArrayList<String>(_list1111.size);
+            String _elem1112;
+            for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
             {
-              _elem1120 = iprot.readString();
-              struct.tbl_names.add(_elem1120);
+              _elem1112 = iprot.readString();
+              struct.tbl_names.add(_elem1112);
             }
           }
           struct.setTbl_namesIsSet(true);
@@ -72888,14 +72888,14 @@ import org.slf4j.LoggerFactory;
             case 0: // SUCCESS
               if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                 {
-                  org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin();
-                  struct.success = new ArrayList<Table>(_list1122.size);
-                  Table _elem1123;
-                  for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124)
+                  org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
+                  struct.success = new ArrayList<Table>(_list1114.size);
+                  Table _elem1115;
+                  for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
                   {
-                    _elem1123 = new Table();
-                    _elem1123.read(iprot);
-                    struct.success.add(_elem1123);
+                    _elem1115 = new Table();
+                    _elem1115.read(iprot);
+                    struct.success.add(_elem1115);
                   }
                   iprot.readListEnd();
                 }
@@ -72921,9 +72921,9 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
           {
             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-            for (Table _iter1125 : struct.success)
+            for (Table _iter1117 : struct.success)
             {
-              _iter1125.write(oprot);
+              _iter1117.write(oprot);
             }
             oprot.writeListEnd();
           }
@@ -72954,9 +72954,9 @@ import org.slf4j.LoggerFactory;
         if (struct.isSetSuccess()) {
           {
             oprot.writeI32(struct.success.size());
-            for (Table _iter1126 : struct.success)
+            for (Table _iter1118 : struct.success)
             {
-              _iter1126.write(oprot);
+              _iter1118.write(oprot);
             }
           }
         }
@@ -72968,14 +72968,14 @@ import org.slf4j.LoggerFactory;
         BitSet incoming = iprot.readBitSet(1);
         if (incoming.get(0)) {
           {
-            org.apache.thrift.protocol.TList _list1127 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-            struct.success = new ArrayList<Table>(_list1127.size);
-            Table _elem1128;
-            for (int _i1129 = 0; _i1129 < _list1127.size; ++_i1129)
+            org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            struct.success = new ArrayList<Table>(_list1119.size);
+            Table _elem1120;
+            for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
             {
-              _elem1128 = new Table();
-              _elem1128.read(iprot);
-              struct.success.add(_elem1128);
+              _elem1120 = new Table();
+              _elem1120.read(iprot);
+              struct.success.add(_elem1120);
             }
           }
           struct.setSuccessIsSet(true);
@@ -74970,8 +74970,8 @@ import org.slf4j.LoggerFactory;
   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_materialization_invalidation_info_args implements org.apache.thrift.TBase<get_materialization_invalidation_info_args, get_materialization_invalidation_info_args._Fields>, java.io.Serializable, Cloneable, Comparable<get_materialization_invalidation_info_args>   {
     private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_materialization_invalidation_info_args");
 
-    private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
-    private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_names", org.apache.thrift.protocol.TType.LIST, (short)2);
+    private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+    private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
 
     private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
     static {
@@ -74979,13 +74979,13 @@ import org.slf4j.LoggerFactory;
       schemes.put(TupleScheme.class, new get_materialization_invalidation_info_argsTupleSchemeFactory());
     }
 
-    private String dbname; // required
-    private List<String> tbl_names; // required
+    private CreationMetadata creation_metadata; // required
+    private String validTxnList; // required
 
     /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
     public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-      DBNAME((short)1, "dbname"),
-      TBL_NAMES((short)2, "tbl_names");
+      CREATION_METADATA((short)1, "creation_metadata"),
+      VALID_TXN_LIST((short)2, "validTxnList");
 
       private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -75000,10 +75000,10 @@ import org.slf4j.LoggerFactory;
        */
       public static _Fields findByThriftId(int fieldId) {
         switch(fieldId) {
-          case 1: // DBNAME
-            return DBNAME;
-          case 2: // TBL_NAMES
-            return TBL_NAMES;
+          case 1: // CREATION_METADATA
+            return CREATION_METADATA;
+          case 2: // VALID_TXN_LIST
+            return VALID_TXN_LIST;
           default:
             return null;
         }
@@ -75047,11 +75047,10 @@ import org.slf4j.LoggerFactory;
     public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
     static {
       Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
-      tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+      tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creation_metadata", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+          new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CreationMetadata.class)));
+      tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.DEFAULT, 
           new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-      tmpMap.put(_Fields.TBL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tbl_names", org.apache.thrift.TFieldRequirementType.DEFAULT, 
-          new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
-              new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
       metaDataMap = Collections.unmodifiableMap(tmpMap);
       org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_materialization_invalidation_info_args.class, metaDataMap);
     }
@@ -75060,24 +75059,23 @@ import org.slf4j.LoggerFactory;
     }
 
     public get_materialization_invalidation_info_args(
-      String dbname,
-      List<String> tbl_names)
+      CreationMetadata creation_metadata,
+      String validTxnList)
     {
       this();
-      this.dbname = dbname;
-      this.tbl_names = tbl_names;
+      this.creation_metadata = creation_metadata;
+      this.validTxnList = validTxnList;
     }
 
     /**
      * Performs a deep copy on <i>other</i>.
      */
     public get_materialization_invalidation_info_args(get_materialization_invalidation_info_args other) {
-      if (other.isSetDbname()) {
-        this.dbname = other.dbname;
+      if (other.isSetCreation_metadata()) {
+        this.creation_metadata = new CreationMetadata(other.creation_metadata);
       }
-      if (other.isSetTbl_names()) {
-        List<String> __this__tbl_names = new ArrayList<String>(other.tbl_names);
-        this.tbl_names = __this__tbl_names;
+      if (other.isSetValidTxnList()) {
+        this.validTxnList = other.validTxnList;
       }
     }
 
@@ -75087,86 +75085,71 @@ import org.slf4j.LoggerFactory;
 
     @Override
     public void clear() {
-      this.dbname = null;
-      this.tbl_names = null;
+      this.creation_metadata = null;
+      this.validTxnList = null;
     }
 
-    public String getDbname() {
-      return this.dbname;
+    public CreationMetadata getCreation_metadata() {
+      return this.creation_metadata;
     }
 
-    public void setDbname(String dbname) {
-      this.dbname = dbname;
+    public void setCreation_metadata(CreationMetadata creation_metadata) {
+      this.creation_metadata = creation_metadata;
     }
 
-    public void unsetDbname() {
-      this.dbname = null;
+    public void unsetCreation_metadata() {
+      this.creation_metadata = null;
     }
 
-    /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
-    public boolean isSetDbname() {
-      return this.dbname != null;
+    /** Returns true if field creation_metadata is set (has been assigned a value) and false otherwise */
+    public boolean isSetCreation_metadata() {
+      return this.creation_metadata != null;
     }
 
-    public void setDbnameIsSet(boolean value) {
+    public void setCreation_metadataIsSet(boolean value) {
       if (!value) {
-        this.dbname = null;
+        this.creation_metadata = null;
       }
     }
 
-    public int getTbl_namesSize() {
-      return (this.tbl_names == null) ? 0 : this.tbl_names.size();
-    }
-
-    public java.util.Iterator<String> getTbl_namesIterator() {
-      return (this.tbl_names == null) ? null : this.tbl_names.iterator();
-    }
-
-    public void addToTbl_names(String elem) {
-      if (this.tbl_names == null) {
-        this.tbl_names = new ArrayList<String>();
-      }
-      this.tbl_names.add(elem);
+    public String getValidTxnList() {
+      return this.validTxnList;
     }
 
-    public List<String> getTbl_names() {
-      return this.tbl_names;
+    public void

<TRUNCATED>

[25/48] hive git commit: HIVE-20006: Make materializations invalidation cache work with multiple active remote metastores (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/1b5903b0/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
index 7f06b3b..26420dd 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -21220,6 +21220,11 @@ void CreationMetadata::__set_validTxnList(const std::string& val) {
 __isset.validTxnList = true;
 }
 
+void CreationMetadata::__set_materializationTime(const int64_t val) {
+  this->materializationTime = val;
+__isset.materializationTime = true;
+}
+
 uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
@@ -21298,6 +21303,14 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) {
           xfer += iprot->skip(ftype);
         }
         break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_I64) {
+          xfer += iprot->readI64(this->materializationTime);
+          this->__isset.materializationTime = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
       default:
         xfer += iprot->skip(ftype);
         break;
@@ -21352,6 +21365,11 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c
     xfer += oprot->writeString(this->validTxnList);
     xfer += oprot->writeFieldEnd();
   }
+  if (this->__isset.materializationTime) {
+    xfer += oprot->writeFieldBegin("materializationTime", ::apache::thrift::protocol::T_I64, 6);
+    xfer += oprot->writeI64(this->materializationTime);
+    xfer += oprot->writeFieldEnd();
+  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -21364,6 +21382,7 @@ void swap(CreationMetadata &a, CreationMetadata &b) {
   swap(a.tblName, b.tblName);
   swap(a.tablesUsed, b.tablesUsed);
   swap(a.validTxnList, b.validTxnList);
+  swap(a.materializationTime, b.materializationTime);
   swap(a.__isset, b.__isset);
 }
 
@@ -21373,6 +21392,7 @@ CreationMetadata::CreationMetadata(const CreationMetadata& other837) {
   tblName = other837.tblName;
   tablesUsed = other837.tablesUsed;
   validTxnList = other837.validTxnList;
+  materializationTime = other837.materializationTime;
   __isset = other837.__isset;
 }
 CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other838) {
@@ -21381,6 +21401,7 @@ CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other838)
   tblName = other838.tblName;
   tablesUsed = other838.tablesUsed;
   validTxnList = other838.validTxnList;
+  materializationTime = other838.materializationTime;
   __isset = other838.__isset;
   return *this;
 }
@@ -21392,6 +21413,7 @@ void CreationMetadata::printTo(std::ostream& out) const {
   out << ", " << "tblName=" << to_string(tblName);
   out << ", " << "tablesUsed=" << to_string(tablesUsed);
   out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "<null>"));
+  out << ", " << "materializationTime="; (__isset.materializationTime ? (out << to_string(materializationTime)) : (out << "<null>"));
   out << ")";
 }
 
@@ -25434,23 +25456,8 @@ Materialization::~Materialization() throw() {
 }
 
 
-void Materialization::__set_tablesUsed(const std::set<std::string> & val) {
-  this->tablesUsed = val;
-}
-
-void Materialization::__set_validTxnList(const std::string& val) {
-  this->validTxnList = val;
-__isset.validTxnList = true;
-}
-
-void Materialization::__set_invalidationTime(const int64_t val) {
-  this->invalidationTime = val;
-__isset.invalidationTime = true;
-}
-
 void Materialization::__set_sourceTablesUpdateDeleteModified(const bool val) {
   this->sourceTablesUpdateDeleteModified = val;
-__isset.sourceTablesUpdateDeleteModified = true;
 }
 
 uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
@@ -25465,7 +25472,7 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   using ::apache::thrift::protocol::TProtocolException;
 
-  bool isset_tablesUsed = false;
+  bool isset_sourceTablesUpdateDeleteModified = false;
 
   while (true)
   {
@@ -25476,46 +25483,9 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
     switch (fid)
     {
       case 1:
-        if (ftype == ::apache::thrift::protocol::T_SET) {
-          {
-            this->tablesUsed.clear();
-            uint32_t _size1012;
-            ::apache::thrift::protocol::TType _etype1015;
-            xfer += iprot->readSetBegin(_etype1015, _size1012);
-            uint32_t _i1016;
-            for (_i1016 = 0; _i1016 < _size1012; ++_i1016)
-            {
-              std::string _elem1017;
-              xfer += iprot->readString(_elem1017);
-              this->tablesUsed.insert(_elem1017);
-            }
-            xfer += iprot->readSetEnd();
-          }
-          isset_tablesUsed = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 2:
-        if (ftype == ::apache::thrift::protocol::T_STRING) {
-          xfer += iprot->readString(this->validTxnList);
-          this->__isset.validTxnList = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 3:
-        if (ftype == ::apache::thrift::protocol::T_I64) {
-          xfer += iprot->readI64(this->invalidationTime);
-          this->__isset.invalidationTime = true;
-        } else {
-          xfer += iprot->skip(ftype);
-        }
-        break;
-      case 4:
         if (ftype == ::apache::thrift::protocol::T_BOOL) {
           xfer += iprot->readBool(this->sourceTablesUpdateDeleteModified);
-          this->__isset.sourceTablesUpdateDeleteModified = true;
+          isset_sourceTablesUpdateDeleteModified = true;
         } else {
           xfer += iprot->skip(ftype);
         }
@@ -25529,7 +25499,7 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) {
 
   xfer += iprot->readStructEnd();
 
-  if (!isset_tablesUsed)
+  if (!isset_sourceTablesUpdateDeleteModified)
     throw TProtocolException(TProtocolException::INVALID_DATA);
   return xfer;
 }
@@ -25539,33 +25509,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
   apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
   xfer += oprot->writeStructBegin("Materialization");
 
-  xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1);
-  {
-    xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->tablesUsed.size()));
-    std::set<std::string> ::const_iterator _iter1018;
-    for (_iter1018 = this->tablesUsed.begin(); _iter1018 != this->tablesUsed.end(); ++_iter1018)
-    {
-      xfer += oprot->writeString((*_iter1018));
-    }
-    xfer += oprot->writeSetEnd();
-  }
+  xfer += oprot->writeFieldBegin("sourceTablesUpdateDeleteModified", ::apache::thrift::protocol::T_BOOL, 1);
+  xfer += oprot->writeBool(this->sourceTablesUpdateDeleteModified);
   xfer += oprot->writeFieldEnd();
 
-  if (this->__isset.validTxnList) {
-    xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 2);
-    xfer += oprot->writeString(this->validTxnList);
-    xfer += oprot->writeFieldEnd();
-  }
-  if (this->__isset.invalidationTime) {
-    xfer += oprot->writeFieldBegin("invalidationTime", ::apache::thrift::protocol::T_I64, 3);
-    xfer += oprot->writeI64(this->invalidationTime);
-    xfer += oprot->writeFieldEnd();
-  }
-  if (this->__isset.sourceTablesUpdateDeleteModified) {
-    xfer += oprot->writeFieldBegin("sourceTablesUpdateDeleteModified", ::apache::thrift::protocol::T_BOOL, 4);
-    xfer += oprot->writeBool(this->sourceTablesUpdateDeleteModified);
-    xfer += oprot->writeFieldEnd();
-  }
   xfer += oprot->writeFieldStop();
   xfer += oprot->writeStructEnd();
   return xfer;
@@ -25573,35 +25520,20 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co
 
 void swap(Materialization &a, Materialization &b) {
   using ::std::swap;
-  swap(a.tablesUsed, b.tablesUsed);
-  swap(a.validTxnList, b.validTxnList);
-  swap(a.invalidationTime, b.invalidationTime);
   swap(a.sourceTablesUpdateDeleteModified, b.sourceTablesUpdateDeleteModified);
-  swap(a.__isset, b.__isset);
 }
 
-Materialization::Materialization(const Materialization& other1019) {
-  tablesUsed = other1019.tablesUsed;
-  validTxnList = other1019.validTxnList;
-  invalidationTime = other1019.invalidationTime;
-  sourceTablesUpdateDeleteModified = other1019.sourceTablesUpdateDeleteModified;
-  __isset = other1019.__isset;
+Materialization::Materialization(const Materialization& other1012) {
+  sourceTablesUpdateDeleteModified = other1012.sourceTablesUpdateDeleteModified;
 }
-Materialization& Materialization::operator=(const Materialization& other1020) {
-  tablesUsed = other1020.tablesUsed;
-  validTxnList = other1020.validTxnList;
-  invalidationTime = other1020.invalidationTime;
-  sourceTablesUpdateDeleteModified = other1020.sourceTablesUpdateDeleteModified;
-  __isset = other1020.__isset;
+Materialization& Materialization::operator=(const Materialization& other1013) {
+  sourceTablesUpdateDeleteModified = other1013.sourceTablesUpdateDeleteModified;
   return *this;
 }
 void Materialization::printTo(std::ostream& out) const {
   using ::apache::thrift::to_string;
   out << "Materialization(";
-  out << "tablesUsed=" << to_string(tablesUsed);
-  out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "<null>"));
-  out << ", " << "invalidationTime="; (__isset.invalidationTime ? (out << to_string(invalidationTime)) : (out << "<null>"));
-  out << ", " << "sourceTablesUpdateDeleteModified="; (__isset.sourceTablesUpdateDeleteModified ? (out << to_string(sourceTablesUpdateDeleteModified)) : (out << "<null>"));
+  out << "sourceTablesUpdateDeleteModified=" << to_string(sourceTablesUpdateDeleteModified);
   out << ")";
 }
 
@@ -25661,9 +25593,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 2:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1021;
-          xfer += iprot->readI32(ecast1021);
-          this->status = (WMResourcePlanStatus::type)ecast1021;
+          int32_t ecast1014;
+          xfer += iprot->readI32(ecast1014);
+          this->status = (WMResourcePlanStatus::type)ecast1014;
           this->__isset.status = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -25737,19 +25669,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMResourcePlan::WMResourcePlan(const WMResourcePlan& other1022) {
-  name = other1022.name;
-  status = other1022.status;
-  queryParallelism = other1022.queryParallelism;
-  defaultPoolPath = other1022.defaultPoolPath;
-  __isset = other1022.__isset;
+WMResourcePlan::WMResourcePlan(const WMResourcePlan& other1015) {
+  name = other1015.name;
+  status = other1015.status;
+  queryParallelism = other1015.queryParallelism;
+  defaultPoolPath = other1015.defaultPoolPath;
+  __isset = other1015.__isset;
 }
-WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other1023) {
-  name = other1023.name;
-  status = other1023.status;
-  queryParallelism = other1023.queryParallelism;
-  defaultPoolPath = other1023.defaultPoolPath;
-  __isset = other1023.__isset;
+WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other1016) {
+  name = other1016.name;
+  status = other1016.status;
+  queryParallelism = other1016.queryParallelism;
+  defaultPoolPath = other1016.defaultPoolPath;
+  __isset = other1016.__isset;
   return *this;
 }
 void WMResourcePlan::printTo(std::ostream& out) const {
@@ -25828,9 +25760,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr
         break;
       case 2:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1024;
-          xfer += iprot->readI32(ecast1024);
-          this->status = (WMResourcePlanStatus::type)ecast1024;
+          int32_t ecast1017;
+          xfer += iprot->readI32(ecast1017);
+          this->status = (WMResourcePlanStatus::type)ecast1017;
           this->__isset.status = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -25931,23 +25863,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other1025) {
-  name = other1025.name;
-  status = other1025.status;
-  queryParallelism = other1025.queryParallelism;
-  isSetQueryParallelism = other1025.isSetQueryParallelism;
-  defaultPoolPath = other1025.defaultPoolPath;
-  isSetDefaultPoolPath = other1025.isSetDefaultPoolPath;
-  __isset = other1025.__isset;
-}
-WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other1026) {
-  name = other1026.name;
-  status = other1026.status;
-  queryParallelism = other1026.queryParallelism;
-  isSetQueryParallelism = other1026.isSetQueryParallelism;
-  defaultPoolPath = other1026.defaultPoolPath;
-  isSetDefaultPoolPath = other1026.isSetDefaultPoolPath;
-  __isset = other1026.__isset;
+WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other1018) {
+  name = other1018.name;
+  status = other1018.status;
+  queryParallelism = other1018.queryParallelism;
+  isSetQueryParallelism = other1018.isSetQueryParallelism;
+  defaultPoolPath = other1018.defaultPoolPath;
+  isSetDefaultPoolPath = other1018.isSetDefaultPoolPath;
+  __isset = other1018.__isset;
+}
+WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other1019) {
+  name = other1019.name;
+  status = other1019.status;
+  queryParallelism = other1019.queryParallelism;
+  isSetQueryParallelism = other1019.isSetQueryParallelism;
+  defaultPoolPath = other1019.defaultPoolPath;
+  isSetDefaultPoolPath = other1019.isSetDefaultPoolPath;
+  __isset = other1019.__isset;
   return *this;
 }
 void WMNullableResourcePlan::printTo(std::ostream& out) const {
@@ -26112,21 +26044,21 @@ void swap(WMPool &a, WMPool &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMPool::WMPool(const WMPool& other1027) {
-  resourcePlanName = other1027.resourcePlanName;
-  poolPath = other1027.poolPath;
-  allocFraction = other1027.allocFraction;
-  queryParallelism = other1027.queryParallelism;
-  schedulingPolicy = other1027.schedulingPolicy;
-  __isset = other1027.__isset;
+WMPool::WMPool(const WMPool& other1020) {
+  resourcePlanName = other1020.resourcePlanName;
+  poolPath = other1020.poolPath;
+  allocFraction = other1020.allocFraction;
+  queryParallelism = other1020.queryParallelism;
+  schedulingPolicy = other1020.schedulingPolicy;
+  __isset = other1020.__isset;
 }
-WMPool& WMPool::operator=(const WMPool& other1028) {
-  resourcePlanName = other1028.resourcePlanName;
-  poolPath = other1028.poolPath;
-  allocFraction = other1028.allocFraction;
-  queryParallelism = other1028.queryParallelism;
-  schedulingPolicy = other1028.schedulingPolicy;
-  __isset = other1028.__isset;
+WMPool& WMPool::operator=(const WMPool& other1021) {
+  resourcePlanName = other1021.resourcePlanName;
+  poolPath = other1021.poolPath;
+  allocFraction = other1021.allocFraction;
+  queryParallelism = other1021.queryParallelism;
+  schedulingPolicy = other1021.schedulingPolicy;
+  __isset = other1021.__isset;
   return *this;
 }
 void WMPool::printTo(std::ostream& out) const {
@@ -26309,23 +26241,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMNullablePool::WMNullablePool(const WMNullablePool& other1029) {
-  resourcePlanName = other1029.resourcePlanName;
-  poolPath = other1029.poolPath;
-  allocFraction = other1029.allocFraction;
-  queryParallelism = other1029.queryParallelism;
-  schedulingPolicy = other1029.schedulingPolicy;
-  isSetSchedulingPolicy = other1029.isSetSchedulingPolicy;
-  __isset = other1029.__isset;
-}
-WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other1030) {
-  resourcePlanName = other1030.resourcePlanName;
-  poolPath = other1030.poolPath;
-  allocFraction = other1030.allocFraction;
-  queryParallelism = other1030.queryParallelism;
-  schedulingPolicy = other1030.schedulingPolicy;
-  isSetSchedulingPolicy = other1030.isSetSchedulingPolicy;
-  __isset = other1030.__isset;
+WMNullablePool::WMNullablePool(const WMNullablePool& other1022) {
+  resourcePlanName = other1022.resourcePlanName;
+  poolPath = other1022.poolPath;
+  allocFraction = other1022.allocFraction;
+  queryParallelism = other1022.queryParallelism;
+  schedulingPolicy = other1022.schedulingPolicy;
+  isSetSchedulingPolicy = other1022.isSetSchedulingPolicy;
+  __isset = other1022.__isset;
+}
+WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other1023) {
+  resourcePlanName = other1023.resourcePlanName;
+  poolPath = other1023.poolPath;
+  allocFraction = other1023.allocFraction;
+  queryParallelism = other1023.queryParallelism;
+  schedulingPolicy = other1023.schedulingPolicy;
+  isSetSchedulingPolicy = other1023.isSetSchedulingPolicy;
+  __isset = other1023.__isset;
   return *this;
 }
 void WMNullablePool::printTo(std::ostream& out) const {
@@ -26490,21 +26422,21 @@ void swap(WMTrigger &a, WMTrigger &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMTrigger::WMTrigger(const WMTrigger& other1031) {
-  resourcePlanName = other1031.resourcePlanName;
-  triggerName = other1031.triggerName;
-  triggerExpression = other1031.triggerExpression;
-  actionExpression = other1031.actionExpression;
-  isInUnmanaged = other1031.isInUnmanaged;
-  __isset = other1031.__isset;
-}
-WMTrigger& WMTrigger::operator=(const WMTrigger& other1032) {
-  resourcePlanName = other1032.resourcePlanName;
-  triggerName = other1032.triggerName;
-  triggerExpression = other1032.triggerExpression;
-  actionExpression = other1032.actionExpression;
-  isInUnmanaged = other1032.isInUnmanaged;
-  __isset = other1032.__isset;
+WMTrigger::WMTrigger(const WMTrigger& other1024) {
+  resourcePlanName = other1024.resourcePlanName;
+  triggerName = other1024.triggerName;
+  triggerExpression = other1024.triggerExpression;
+  actionExpression = other1024.actionExpression;
+  isInUnmanaged = other1024.isInUnmanaged;
+  __isset = other1024.__isset;
+}
+WMTrigger& WMTrigger::operator=(const WMTrigger& other1025) {
+  resourcePlanName = other1025.resourcePlanName;
+  triggerName = other1025.triggerName;
+  triggerExpression = other1025.triggerExpression;
+  actionExpression = other1025.actionExpression;
+  isInUnmanaged = other1025.isInUnmanaged;
+  __isset = other1025.__isset;
   return *this;
 }
 void WMTrigger::printTo(std::ostream& out) const {
@@ -26669,21 +26601,21 @@ void swap(WMMapping &a, WMMapping &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMMapping::WMMapping(const WMMapping& other1033) {
-  resourcePlanName = other1033.resourcePlanName;
-  entityType = other1033.entityType;
-  entityName = other1033.entityName;
-  poolPath = other1033.poolPath;
-  ordering = other1033.ordering;
-  __isset = other1033.__isset;
-}
-WMMapping& WMMapping::operator=(const WMMapping& other1034) {
-  resourcePlanName = other1034.resourcePlanName;
-  entityType = other1034.entityType;
-  entityName = other1034.entityName;
-  poolPath = other1034.poolPath;
-  ordering = other1034.ordering;
-  __isset = other1034.__isset;
+WMMapping::WMMapping(const WMMapping& other1026) {
+  resourcePlanName = other1026.resourcePlanName;
+  entityType = other1026.entityType;
+  entityName = other1026.entityName;
+  poolPath = other1026.poolPath;
+  ordering = other1026.ordering;
+  __isset = other1026.__isset;
+}
+WMMapping& WMMapping::operator=(const WMMapping& other1027) {
+  resourcePlanName = other1027.resourcePlanName;
+  entityType = other1027.entityType;
+  entityName = other1027.entityName;
+  poolPath = other1027.poolPath;
+  ordering = other1027.ordering;
+  __isset = other1027.__isset;
   return *this;
 }
 void WMMapping::printTo(std::ostream& out) const {
@@ -26789,13 +26721,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) {
   swap(a.trigger, b.trigger);
 }
 
-WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other1035) {
-  pool = other1035.pool;
-  trigger = other1035.trigger;
+WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other1028) {
+  pool = other1028.pool;
+  trigger = other1028.trigger;
 }
-WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other1036) {
-  pool = other1036.pool;
-  trigger = other1036.trigger;
+WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other1029) {
+  pool = other1029.pool;
+  trigger = other1029.trigger;
   return *this;
 }
 void WMPoolTrigger::printTo(std::ostream& out) const {
@@ -26869,14 +26801,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->pools.clear();
-            uint32_t _size1037;
-            ::apache::thrift::protocol::TType _etype1040;
-            xfer += iprot->readListBegin(_etype1040, _size1037);
-            this->pools.resize(_size1037);
-            uint32_t _i1041;
-            for (_i1041 = 0; _i1041 < _size1037; ++_i1041)
+            uint32_t _size1030;
+            ::apache::thrift::protocol::TType _etype1033;
+            xfer += iprot->readListBegin(_etype1033, _size1030);
+            this->pools.resize(_size1030);
+            uint32_t _i1034;
+            for (_i1034 = 0; _i1034 < _size1030; ++_i1034)
             {
-              xfer += this->pools[_i1041].read(iprot);
+              xfer += this->pools[_i1034].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -26889,14 +26821,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->mappings.clear();
-            uint32_t _size1042;
-            ::apache::thrift::protocol::TType _etype1045;
-            xfer += iprot->readListBegin(_etype1045, _size1042);
-            this->mappings.resize(_size1042);
-            uint32_t _i1046;
-            for (_i1046 = 0; _i1046 < _size1042; ++_i1046)
+            uint32_t _size1035;
+            ::apache::thrift::protocol::TType _etype1038;
+            xfer += iprot->readListBegin(_etype1038, _size1035);
+            this->mappings.resize(_size1035);
+            uint32_t _i1039;
+            for (_i1039 = 0; _i1039 < _size1035; ++_i1039)
             {
-              xfer += this->mappings[_i1046].read(iprot);
+              xfer += this->mappings[_i1039].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -26909,14 +26841,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->triggers.clear();
-            uint32_t _size1047;
-            ::apache::thrift::protocol::TType _etype1050;
-            xfer += iprot->readListBegin(_etype1050, _size1047);
-            this->triggers.resize(_size1047);
-            uint32_t _i1051;
-            for (_i1051 = 0; _i1051 < _size1047; ++_i1051)
+            uint32_t _size1040;
+            ::apache::thrift::protocol::TType _etype1043;
+            xfer += iprot->readListBegin(_etype1043, _size1040);
+            this->triggers.resize(_size1040);
+            uint32_t _i1044;
+            for (_i1044 = 0; _i1044 < _size1040; ++_i1044)
             {
-              xfer += this->triggers[_i1051].read(iprot);
+              xfer += this->triggers[_i1044].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -26929,14 +26861,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot)
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->poolTriggers.clear();
-            uint32_t _size1052;
-            ::apache::thrift::protocol::TType _etype1055;
-            xfer += iprot->readListBegin(_etype1055, _size1052);
-            this->poolTriggers.resize(_size1052);
-            uint32_t _i1056;
-            for (_i1056 = 0; _i1056 < _size1052; ++_i1056)
+            uint32_t _size1045;
+            ::apache::thrift::protocol::TType _etype1048;
+            xfer += iprot->readListBegin(_etype1048, _size1045);
+            this->poolTriggers.resize(_size1045);
+            uint32_t _i1049;
+            for (_i1049 = 0; _i1049 < _size1045; ++_i1049)
             {
-              xfer += this->poolTriggers[_i1056].read(iprot);
+              xfer += this->poolTriggers[_i1049].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -26973,10 +26905,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot)
   xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->pools.size()));
-    std::vector<WMPool> ::const_iterator _iter1057;
-    for (_iter1057 = this->pools.begin(); _iter1057 != this->pools.end(); ++_iter1057)
+    std::vector<WMPool> ::const_iterator _iter1050;
+    for (_iter1050 = this->pools.begin(); _iter1050 != this->pools.end(); ++_iter1050)
     {
-      xfer += (*_iter1057).write(oprot);
+      xfer += (*_iter1050).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -26986,10 +26918,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot)
     xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->mappings.size()));
-      std::vector<WMMapping> ::const_iterator _iter1058;
-      for (_iter1058 = this->mappings.begin(); _iter1058 != this->mappings.end(); ++_iter1058)
+      std::vector<WMMapping> ::const_iterator _iter1051;
+      for (_iter1051 = this->mappings.begin(); _iter1051 != this->mappings.end(); ++_iter1051)
       {
-        xfer += (*_iter1058).write(oprot);
+        xfer += (*_iter1051).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -26999,10 +26931,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot)
     xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->triggers.size()));
-      std::vector<WMTrigger> ::const_iterator _iter1059;
-      for (_iter1059 = this->triggers.begin(); _iter1059 != this->triggers.end(); ++_iter1059)
+      std::vector<WMTrigger> ::const_iterator _iter1052;
+      for (_iter1052 = this->triggers.begin(); _iter1052 != this->triggers.end(); ++_iter1052)
       {
-        xfer += (*_iter1059).write(oprot);
+        xfer += (*_iter1052).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -27012,10 +26944,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot)
     xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->poolTriggers.size()));
-      std::vector<WMPoolTrigger> ::const_iterator _iter1060;
-      for (_iter1060 = this->poolTriggers.begin(); _iter1060 != this->poolTriggers.end(); ++_iter1060)
+      std::vector<WMPoolTrigger> ::const_iterator _iter1053;
+      for (_iter1053 = this->poolTriggers.begin(); _iter1053 != this->poolTriggers.end(); ++_iter1053)
       {
-        xfer += (*_iter1060).write(oprot);
+        xfer += (*_iter1053).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -27036,21 +26968,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other1061) {
-  plan = other1061.plan;
-  pools = other1061.pools;
-  mappings = other1061.mappings;
-  triggers = other1061.triggers;
-  poolTriggers = other1061.poolTriggers;
-  __isset = other1061.__isset;
-}
-WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other1062) {
-  plan = other1062.plan;
-  pools = other1062.pools;
-  mappings = other1062.mappings;
-  triggers = other1062.triggers;
-  poolTriggers = other1062.poolTriggers;
-  __isset = other1062.__isset;
+WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other1054) {
+  plan = other1054.plan;
+  pools = other1054.pools;
+  mappings = other1054.mappings;
+  triggers = other1054.triggers;
+  poolTriggers = other1054.poolTriggers;
+  __isset = other1054.__isset;
+}
+WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other1055) {
+  plan = other1055.plan;
+  pools = other1055.pools;
+  mappings = other1055.mappings;
+  triggers = other1055.triggers;
+  poolTriggers = other1055.poolTriggers;
+  __isset = other1055.__isset;
   return *this;
 }
 void WMFullResourcePlan::printTo(std::ostream& out) const {
@@ -27155,15 +27087,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other1063) {
-  resourcePlan = other1063.resourcePlan;
-  copyFrom = other1063.copyFrom;
-  __isset = other1063.__isset;
+WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other1056) {
+  resourcePlan = other1056.resourcePlan;
+  copyFrom = other1056.copyFrom;
+  __isset = other1056.__isset;
 }
-WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other1064) {
-  resourcePlan = other1064.resourcePlan;
-  copyFrom = other1064.copyFrom;
-  __isset = other1064.__isset;
+WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other1057) {
+  resourcePlan = other1057.resourcePlan;
+  copyFrom = other1057.copyFrom;
+  __isset = other1057.__isset;
   return *this;
 }
 void WMCreateResourcePlanRequest::printTo(std::ostream& out) const {
@@ -27223,11 +27155,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) {
   (void) b;
 }
 
-WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1065) {
-  (void) other1065;
+WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other1058) {
+  (void) other1058;
 }
-WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1066) {
-  (void) other1066;
+WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other1059) {
+  (void) other1059;
   return *this;
 }
 void WMCreateResourcePlanResponse::printTo(std::ostream& out) const {
@@ -27285,11 +27217,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b)
   (void) b;
 }
 
-WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1067) {
-  (void) other1067;
+WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other1060) {
+  (void) other1060;
 }
-WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1068) {
-  (void) other1068;
+WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other1061) {
+  (void) other1061;
   return *this;
 }
 void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const {
@@ -27370,13 +27302,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b
   swap(a.__isset, b.__isset);
 }
 
-WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1069) {
-  resourcePlan = other1069.resourcePlan;
-  __isset = other1069.__isset;
+WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other1062) {
+  resourcePlan = other1062.resourcePlan;
+  __isset = other1062.__isset;
 }
-WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1070) {
-  resourcePlan = other1070.resourcePlan;
-  __isset = other1070.__isset;
+WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other1063) {
+  resourcePlan = other1063.resourcePlan;
+  __isset = other1063.__isset;
   return *this;
 }
 void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const {
@@ -27458,13 +27390,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1071) {
-  resourcePlanName = other1071.resourcePlanName;
-  __isset = other1071.__isset;
+WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other1064) {
+  resourcePlanName = other1064.resourcePlanName;
+  __isset = other1064.__isset;
 }
-WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1072) {
-  resourcePlanName = other1072.resourcePlanName;
-  __isset = other1072.__isset;
+WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other1065) {
+  resourcePlanName = other1065.resourcePlanName;
+  __isset = other1065.__isset;
   return *this;
 }
 void WMGetResourcePlanRequest::printTo(std::ostream& out) const {
@@ -27546,13 +27478,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1073) {
-  resourcePlan = other1073.resourcePlan;
-  __isset = other1073.__isset;
+WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other1066) {
+  resourcePlan = other1066.resourcePlan;
+  __isset = other1066.__isset;
 }
-WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1074) {
-  resourcePlan = other1074.resourcePlan;
-  __isset = other1074.__isset;
+WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other1067) {
+  resourcePlan = other1067.resourcePlan;
+  __isset = other1067.__isset;
   return *this;
 }
 void WMGetResourcePlanResponse::printTo(std::ostream& out) const {
@@ -27611,11 +27543,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) {
   (void) b;
 }
 
-WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1075) {
-  (void) other1075;
+WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other1068) {
+  (void) other1068;
 }
-WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1076) {
-  (void) other1076;
+WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other1069) {
+  (void) other1069;
   return *this;
 }
 void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const {
@@ -27659,14 +27591,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->resourcePlans.clear();
-            uint32_t _size1077;
-            ::apache::thrift::protocol::TType _etype1080;
-            xfer += iprot->readListBegin(_etype1080, _size1077);
-            this->resourcePlans.resize(_size1077);
-            uint32_t _i1081;
-            for (_i1081 = 0; _i1081 < _size1077; ++_i1081)
+            uint32_t _size1070;
+            ::apache::thrift::protocol::TType _etype1073;
+            xfer += iprot->readListBegin(_etype1073, _size1070);
+            this->resourcePlans.resize(_size1070);
+            uint32_t _i1074;
+            for (_i1074 = 0; _i1074 < _size1070; ++_i1074)
             {
-              xfer += this->resourcePlans[_i1081].read(iprot);
+              xfer += this->resourcePlans[_i1074].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -27696,10 +27628,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc
     xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->resourcePlans.size()));
-      std::vector<WMResourcePlan> ::const_iterator _iter1082;
-      for (_iter1082 = this->resourcePlans.begin(); _iter1082 != this->resourcePlans.end(); ++_iter1082)
+      std::vector<WMResourcePlan> ::const_iterator _iter1075;
+      for (_iter1075 = this->resourcePlans.begin(); _iter1075 != this->resourcePlans.end(); ++_iter1075)
       {
-        xfer += (*_iter1082).write(oprot);
+        xfer += (*_iter1075).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -27716,13 +27648,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1083) {
-  resourcePlans = other1083.resourcePlans;
-  __isset = other1083.__isset;
+WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1076) {
+  resourcePlans = other1076.resourcePlans;
+  __isset = other1076.__isset;
 }
-WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1084) {
-  resourcePlans = other1084.resourcePlans;
-  __isset = other1084.__isset;
+WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1077) {
+  resourcePlans = other1077.resourcePlans;
+  __isset = other1077.__isset;
   return *this;
 }
 void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const {
@@ -27880,21 +27812,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1085) {
-  resourcePlanName = other1085.resourcePlanName;
-  resourcePlan = other1085.resourcePlan;
-  isEnableAndActivate = other1085.isEnableAndActivate;
-  isForceDeactivate = other1085.isForceDeactivate;
-  isReplace = other1085.isReplace;
-  __isset = other1085.__isset;
-}
-WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1086) {
-  resourcePlanName = other1086.resourcePlanName;
-  resourcePlan = other1086.resourcePlan;
-  isEnableAndActivate = other1086.isEnableAndActivate;
-  isForceDeactivate = other1086.isForceDeactivate;
-  isReplace = other1086.isReplace;
-  __isset = other1086.__isset;
+WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1078) {
+  resourcePlanName = other1078.resourcePlanName;
+  resourcePlan = other1078.resourcePlan;
+  isEnableAndActivate = other1078.isEnableAndActivate;
+  isForceDeactivate = other1078.isForceDeactivate;
+  isReplace = other1078.isReplace;
+  __isset = other1078.__isset;
+}
+WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1079) {
+  resourcePlanName = other1079.resourcePlanName;
+  resourcePlan = other1079.resourcePlan;
+  isEnableAndActivate = other1079.isEnableAndActivate;
+  isForceDeactivate = other1079.isForceDeactivate;
+  isReplace = other1079.isReplace;
+  __isset = other1079.__isset;
   return *this;
 }
 void WMAlterResourcePlanRequest::printTo(std::ostream& out) const {
@@ -27980,13 +27912,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1087) {
-  fullResourcePlan = other1087.fullResourcePlan;
-  __isset = other1087.__isset;
+WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1080) {
+  fullResourcePlan = other1080.fullResourcePlan;
+  __isset = other1080.__isset;
 }
-WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1088) {
-  fullResourcePlan = other1088.fullResourcePlan;
-  __isset = other1088.__isset;
+WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1081) {
+  fullResourcePlan = other1081.fullResourcePlan;
+  __isset = other1081.__isset;
   return *this;
 }
 void WMAlterResourcePlanResponse::printTo(std::ostream& out) const {
@@ -28068,13 +28000,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1089) {
-  resourcePlanName = other1089.resourcePlanName;
-  __isset = other1089.__isset;
+WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1082) {
+  resourcePlanName = other1082.resourcePlanName;
+  __isset = other1082.__isset;
 }
-WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1090) {
-  resourcePlanName = other1090.resourcePlanName;
-  __isset = other1090.__isset;
+WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1083) {
+  resourcePlanName = other1083.resourcePlanName;
+  __isset = other1083.__isset;
   return *this;
 }
 void WMValidateResourcePlanRequest::printTo(std::ostream& out) const {
@@ -28124,14 +28056,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->errors.clear();
-            uint32_t _size1091;
-            ::apache::thrift::protocol::TType _etype1094;
-            xfer += iprot->readListBegin(_etype1094, _size1091);
-            this->errors.resize(_size1091);
-            uint32_t _i1095;
-            for (_i1095 = 0; _i1095 < _size1091; ++_i1095)
+            uint32_t _size1084;
+            ::apache::thrift::protocol::TType _etype1087;
+            xfer += iprot->readListBegin(_etype1087, _size1084);
+            this->errors.resize(_size1084);
+            uint32_t _i1088;
+            for (_i1088 = 0; _i1088 < _size1084; ++_i1088)
             {
-              xfer += iprot->readString(this->errors[_i1095]);
+              xfer += iprot->readString(this->errors[_i1088]);
             }
             xfer += iprot->readListEnd();
           }
@@ -28144,14 +28076,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->warnings.clear();
-            uint32_t _size1096;
-            ::apache::thrift::protocol::TType _etype1099;
-            xfer += iprot->readListBegin(_etype1099, _size1096);
-            this->warnings.resize(_size1096);
-            uint32_t _i1100;
-            for (_i1100 = 0; _i1100 < _size1096; ++_i1100)
+            uint32_t _size1089;
+            ::apache::thrift::protocol::TType _etype1092;
+            xfer += iprot->readListBegin(_etype1092, _size1089);
+            this->warnings.resize(_size1089);
+            uint32_t _i1093;
+            for (_i1093 = 0; _i1093 < _size1089; ++_i1093)
             {
-              xfer += iprot->readString(this->warnings[_i1100]);
+              xfer += iprot->readString(this->warnings[_i1093]);
             }
             xfer += iprot->readListEnd();
           }
@@ -28181,10 +28113,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt
     xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->errors.size()));
-      std::vector<std::string> ::const_iterator _iter1101;
-      for (_iter1101 = this->errors.begin(); _iter1101 != this->errors.end(); ++_iter1101)
+      std::vector<std::string> ::const_iterator _iter1094;
+      for (_iter1094 = this->errors.begin(); _iter1094 != this->errors.end(); ++_iter1094)
       {
-        xfer += oprot->writeString((*_iter1101));
+        xfer += oprot->writeString((*_iter1094));
       }
       xfer += oprot->writeListEnd();
     }
@@ -28194,10 +28126,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt
     xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->warnings.size()));
-      std::vector<std::string> ::const_iterator _iter1102;
-      for (_iter1102 = this->warnings.begin(); _iter1102 != this->warnings.end(); ++_iter1102)
+      std::vector<std::string> ::const_iterator _iter1095;
+      for (_iter1095 = this->warnings.begin(); _iter1095 != this->warnings.end(); ++_iter1095)
       {
-        xfer += oprot->writeString((*_iter1102));
+        xfer += oprot->writeString((*_iter1095));
       }
       xfer += oprot->writeListEnd();
     }
@@ -28215,15 +28147,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b)
   swap(a.__isset, b.__isset);
 }
 
-WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1103) {
-  errors = other1103.errors;
-  warnings = other1103.warnings;
-  __isset = other1103.__isset;
+WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1096) {
+  errors = other1096.errors;
+  warnings = other1096.warnings;
+  __isset = other1096.__isset;
 }
-WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1104) {
-  errors = other1104.errors;
-  warnings = other1104.warnings;
-  __isset = other1104.__isset;
+WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1097) {
+  errors = other1097.errors;
+  warnings = other1097.warnings;
+  __isset = other1097.__isset;
   return *this;
 }
 void WMValidateResourcePlanResponse::printTo(std::ostream& out) const {
@@ -28306,13 +28238,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1105) {
-  resourcePlanName = other1105.resourcePlanName;
-  __isset = other1105.__isset;
+WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1098) {
+  resourcePlanName = other1098.resourcePlanName;
+  __isset = other1098.__isset;
 }
-WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1106) {
-  resourcePlanName = other1106.resourcePlanName;
-  __isset = other1106.__isset;
+WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1099) {
+  resourcePlanName = other1099.resourcePlanName;
+  __isset = other1099.__isset;
   return *this;
 }
 void WMDropResourcePlanRequest::printTo(std::ostream& out) const {
@@ -28371,11 +28303,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) {
   (void) b;
 }
 
-WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1107) {
-  (void) other1107;
+WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1100) {
+  (void) other1100;
 }
-WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1108) {
-  (void) other1108;
+WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1101) {
+  (void) other1101;
   return *this;
 }
 void WMDropResourcePlanResponse::printTo(std::ostream& out) const {
@@ -28456,13 +28388,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1109) {
-  trigger = other1109.trigger;
-  __isset = other1109.__isset;
+WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1102) {
+  trigger = other1102.trigger;
+  __isset = other1102.__isset;
 }
-WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1110) {
-  trigger = other1110.trigger;
-  __isset = other1110.__isset;
+WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1103) {
+  trigger = other1103.trigger;
+  __isset = other1103.__isset;
   return *this;
 }
 void WMCreateTriggerRequest::printTo(std::ostream& out) const {
@@ -28521,11 +28453,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) {
   (void) b;
 }
 
-WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1111) {
-  (void) other1111;
+WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1104) {
+  (void) other1104;
 }
-WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1112) {
-  (void) other1112;
+WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1105) {
+  (void) other1105;
   return *this;
 }
 void WMCreateTriggerResponse::printTo(std::ostream& out) const {
@@ -28606,13 +28538,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1113) {
-  trigger = other1113.trigger;
-  __isset = other1113.__isset;
+WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1106) {
+  trigger = other1106.trigger;
+  __isset = other1106.__isset;
 }
-WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1114) {
-  trigger = other1114.trigger;
-  __isset = other1114.__isset;
+WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1107) {
+  trigger = other1107.trigger;
+  __isset = other1107.__isset;
   return *this;
 }
 void WMAlterTriggerRequest::printTo(std::ostream& out) const {
@@ -28671,11 +28603,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) {
   (void) b;
 }
 
-WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1115) {
-  (void) other1115;
+WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1108) {
+  (void) other1108;
 }
-WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1116) {
-  (void) other1116;
+WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1109) {
+  (void) other1109;
   return *this;
 }
 void WMAlterTriggerResponse::printTo(std::ostream& out) const {
@@ -28775,15 +28707,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1117) {
-  resourcePlanName = other1117.resourcePlanName;
-  triggerName = other1117.triggerName;
-  __isset = other1117.__isset;
+WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1110) {
+  resourcePlanName = other1110.resourcePlanName;
+  triggerName = other1110.triggerName;
+  __isset = other1110.__isset;
 }
-WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1118) {
-  resourcePlanName = other1118.resourcePlanName;
-  triggerName = other1118.triggerName;
-  __isset = other1118.__isset;
+WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1111) {
+  resourcePlanName = other1111.resourcePlanName;
+  triggerName = other1111.triggerName;
+  __isset = other1111.__isset;
   return *this;
 }
 void WMDropTriggerRequest::printTo(std::ostream& out) const {
@@ -28843,11 +28775,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) {
   (void) b;
 }
 
-WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1119) {
-  (void) other1119;
+WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1112) {
+  (void) other1112;
 }
-WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1120) {
-  (void) other1120;
+WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1113) {
+  (void) other1113;
   return *this;
 }
 void WMDropTriggerResponse::printTo(std::ostream& out) const {
@@ -28928,13 +28860,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ
   swap(a.__isset, b.__isset);
 }
 
-WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1121) {
-  resourcePlanName = other1121.resourcePlanName;
-  __isset = other1121.__isset;
+WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1114) {
+  resourcePlanName = other1114.resourcePlanName;
+  __isset = other1114.__isset;
 }
-WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1122) {
-  resourcePlanName = other1122.resourcePlanName;
-  __isset = other1122.__isset;
+WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1115) {
+  resourcePlanName = other1115.resourcePlanName;
+  __isset = other1115.__isset;
   return *this;
 }
 void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const {
@@ -28979,14 +28911,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->triggers.clear();
-            uint32_t _size1123;
-            ::apache::thrift::protocol::TType _etype1126;
-            xfer += iprot->readListBegin(_etype1126, _size1123);
-            this->triggers.resize(_size1123);
-            uint32_t _i1127;
-            for (_i1127 = 0; _i1127 < _size1123; ++_i1127)
+            uint32_t _size1116;
+            ::apache::thrift::protocol::TType _etype1119;
+            xfer += iprot->readListBegin(_etype1119, _size1116);
+            this->triggers.resize(_size1116);
+            uint32_t _i1120;
+            for (_i1120 = 0; _i1120 < _size1116; ++_i1120)
             {
-              xfer += this->triggers[_i1127].read(iprot);
+              xfer += this->triggers[_i1120].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -29016,10 +28948,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol::
     xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1);
     {
       xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->triggers.size()));
-      std::vector<WMTrigger> ::const_iterator _iter1128;
-      for (_iter1128 = this->triggers.begin(); _iter1128 != this->triggers.end(); ++_iter1128)
+      std::vector<WMTrigger> ::const_iterator _iter1121;
+      for (_iter1121 = this->triggers.begin(); _iter1121 != this->triggers.end(); ++_iter1121)
       {
-        xfer += (*_iter1128).write(oprot);
+        xfer += (*_iter1121).write(oprot);
       }
       xfer += oprot->writeListEnd();
     }
@@ -29036,13 +28968,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes
   swap(a.__isset, b.__isset);
 }
 
-WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1129) {
-  triggers = other1129.triggers;
-  __isset = other1129.__isset;
+WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1122) {
+  triggers = other1122.triggers;
+  __isset = other1122.__isset;
 }
-WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1130) {
-  triggers = other1130.triggers;
-  __isset = other1130.__isset;
+WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1123) {
+  triggers = other1123.triggers;
+  __isset = other1123.__isset;
   return *this;
 }
 void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const {
@@ -29124,13 +29056,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1131) {
-  pool = other1131.pool;
-  __isset = other1131.__isset;
+WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1124) {
+  pool = other1124.pool;
+  __isset = other1124.__isset;
 }
-WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1132) {
-  pool = other1132.pool;
-  __isset = other1132.__isset;
+WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1125) {
+  pool = other1125.pool;
+  __isset = other1125.__isset;
   return *this;
 }
 void WMCreatePoolRequest::printTo(std::ostream& out) const {
@@ -29189,11 +29121,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) {
   (void) b;
 }
 
-WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1133) {
-  (void) other1133;
+WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1126) {
+  (void) other1126;
 }
-WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1134) {
-  (void) other1134;
+WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1127) {
+  (void) other1127;
   return *this;
 }
 void WMCreatePoolResponse::printTo(std::ostream& out) const {
@@ -29293,15 +29225,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1135) {
-  pool = other1135.pool;
-  poolPath = other1135.poolPath;
-  __isset = other1135.__isset;
+WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1128) {
+  pool = other1128.pool;
+  poolPath = other1128.poolPath;
+  __isset = other1128.__isset;
 }
-WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1136) {
-  pool = other1136.pool;
-  poolPath = other1136.poolPath;
-  __isset = other1136.__isset;
+WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1129) {
+  pool = other1129.pool;
+  poolPath = other1129.poolPath;
+  __isset = other1129.__isset;
   return *this;
 }
 void WMAlterPoolRequest::printTo(std::ostream& out) const {
@@ -29361,11 +29293,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) {
   (void) b;
 }
 
-WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1137) {
-  (void) other1137;
+WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1130) {
+  (void) other1130;
 }
-WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1138) {
-  (void) other1138;
+WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1131) {
+  (void) other1131;
   return *this;
 }
 void WMAlterPoolResponse::printTo(std::ostream& out) const {
@@ -29465,15 +29397,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1139) {
-  resourcePlanName = other1139.resourcePlanName;
-  poolPath = other1139.poolPath;
-  __isset = other1139.__isset;
+WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1132) {
+  resourcePlanName = other1132.resourcePlanName;
+  poolPath = other1132.poolPath;
+  __isset = other1132.__isset;
 }
-WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1140) {
-  resourcePlanName = other1140.resourcePlanName;
-  poolPath = other1140.poolPath;
-  __isset = other1140.__isset;
+WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1133) {
+  resourcePlanName = other1133.resourcePlanName;
+  poolPath = other1133.poolPath;
+  __isset = other1133.__isset;
   return *this;
 }
 void WMDropPoolRequest::printTo(std::ostream& out) const {
@@ -29533,11 +29465,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) {
   (void) b;
 }
 
-WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1141) {
-  (void) other1141;
+WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1134) {
+  (void) other1134;
 }
-WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1142) {
-  (void) other1142;
+WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1135) {
+  (void) other1135;
   return *this;
 }
 void WMDropPoolResponse::printTo(std::ostream& out) const {
@@ -29637,15 +29569,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b)
   swap(a.__isset, b.__isset);
 }
 
-WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1143) {
-  mapping = other1143.mapping;
-  update = other1143.update;
-  __isset = other1143.__isset;
+WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1136) {
+  mapping = other1136.mapping;
+  update = other1136.update;
+  __isset = other1136.__isset;
 }
-WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1144) {
-  mapping = other1144.mapping;
-  update = other1144.update;
-  __isset = other1144.__isset;
+WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1137) {
+  mapping = other1137.mapping;
+  update = other1137.update;
+  __isset = other1137.__isset;
   return *this;
 }
 void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const {
@@ -29705,11 +29637,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b
   (void) b;
 }
 
-WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1145) {
-  (void) other1145;
+WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1138) {
+  (void) other1138;
 }
-WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1146) {
-  (void) other1146;
+WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1139) {
+  (void) other1139;
   return *this;
 }
 void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const {
@@ -29790,13 +29722,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1147) {
-  mapping = other1147.mapping;
-  __isset = other1147.__isset;
+WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1140) {
+  mapping = other1140.mapping;
+  __isset = other1140.__isset;
 }
-WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1148) {
-  mapping = other1148.mapping;
-  __isset = other1148.__isset;
+WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1141) {
+  mapping = other1141.mapping;
+  __isset = other1141.__isset;
   return *this;
 }
 void WMDropMappingRequest::printTo(std::ostream& out) const {
@@ -29855,11 +29787,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) {
   (void) b;
 }
 
-WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1149) {
-  (void) other1149;
+WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1142) {
+  (void) other1142;
 }
-WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1150) {
-  (void) other1150;
+WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1143) {
+  (void) other1143;
   return *this;
 }
 void WMDropMappingResponse::printTo(std::ostream& out) const {
@@ -29997,19 +29929,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP
   swap(a.__isset, b.__isset);
 }
 
-WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1151) {
-  resourcePlanName = other1151.resourcePlanName;
-  triggerName = other1151.triggerName;
-  poolPath = other1151.poolPath;
-  drop = other1151.drop;
-  __isset = other1151.__isset;
+WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1144) {
+  resourcePlanName = other1144.resourcePlanName;
+  triggerName = other1144.triggerName;
+  poolPath = other1144.poolPath;
+  drop = other1144.drop;
+  __isset = other1144.__isset;
 }
-WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1152) {
-  resourcePlanName = other1152.resourcePlanName;
-  triggerName = other1152.triggerName;
-  poolPath = other1152.poolPath;
-  drop = other1152.drop;
-  __isset = other1152.__isset;
+WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1145) {
+  resourcePlanName = other1145.resourcePlanName;
+  triggerName = other1145.triggerName;
+  poolPath = other1145.poolPath;
+  drop = other1145.drop;
+  __isset = other1145.__isset;
   return *this;
 }
 void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const {
@@ -30071,11 +30003,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo
   (void) b;
 }
 
-WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1153) {
-  (void) other1153;
+WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1146) {
+  (void) other1146;
 }
-WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1154) {
-  (void) other1154;
+WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1147) {
+  (void) other1147;
   return *this;
 }
 void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const {
@@ -30150,9 +30082,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) {
     {
       case 1:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1155;
-          xfer += iprot->readI32(ecast1155);
-          this->schemaType = (SchemaType::type)ecast1155;
+          int32_t ecast1148;
+          xfer += iprot->readI32(ecast1148);
+          this->schemaType = (SchemaType::type)ecast1148;
           this->__isset.schemaType = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -30184,9 +30116,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 5:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1156;
-          xfer += iprot->readI32(ecast1156);
-          this->compatibility = (SchemaCompatibility::type)ecast1156;
+          int32_t ecast1149;
+          xfer += iprot->readI32(ecast1149);
+          this->compatibility = (SchemaCompatibility::type)ecast1149;
           this->__isset.compatibility = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -30194,9 +30126,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 6:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1157;
-          xfer += iprot->readI32(ecast1157);
-          this->validationLevel = (SchemaValidation::type)ecast1157;
+          int32_t ecast1150;
+          xfer += iprot->readI32(ecast1150);
+          this->validationLevel = (SchemaValidation::type)ecast1150;
           this->__isset.validationLevel = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -30300,29 +30232,29 @@ void swap(ISchema &a, ISchema &b) {
   swap(a.__isset, b.__isset);
 }
 
-ISchema::ISchema(const ISchema& other1158) {
-  schemaType = other1158.schemaType;
-  name = other1158.name;
-  catName = other1158.catName;
-  dbName = other1158.dbName;
-  compatibility = other1158.compatibility;
-  validationLevel = other1158.validationLevel;
-  canEvolve = other1158.canEvolve;
-  schemaGroup = other1158.schemaGroup;
-  description = other1158.description;
-  __isset = other1158.__isset;
-}
-ISchema& ISchema::operator=(const ISchema& other1159) {
-  schemaType = other1159.schemaType;
-  name = other1159.name;
-  catName = other1159.catName;
-  dbName = other1159.dbName;
-  compatibility = other1159.compatibility;
-  validationLevel = other1159.validationLevel;
-  canEvolve = other1159.canEvolve;
-  schemaGroup = other1159.schemaGroup;
-  description = other1159.description;
-  __isset = other1159.__isset;
+ISchema::ISchema(const ISchema& other1151) {
+  schemaType = other1151.schemaType;
+  name = other1151.name;
+  catName = other1151.catName;
+  dbName = other1151.dbName;
+  compatibility = other1151.compatibility;
+  validationLevel = other1151.validationLevel;
+  canEvolve = other1151.canEvolve;
+  schemaGroup = other1151.schemaGroup;
+  description = other1151.description;
+  __isset = other1151.__isset;
+}
+ISchema& ISchema::operator=(const ISchema& other1152) {
+  schemaType = other1152.schemaType;
+  name = other1152.name;
+  catName = other1152.catName;
+  dbName = other1152.dbName;
+  compatibility = other1152.compatibility;
+  validationLevel = other1152.validationLevel;
+  canEvolve = other1152.canEvolve;
+  schemaGroup = other1152.schemaGroup;
+  description = other1152.description;
+  __isset = other1152.__isset;
   return *this;
 }
 void ISchema::printTo(std::ostream& out) const {
@@ -30444,17 +30376,17 @@ void swap(ISchemaName &a, ISchemaName &b) {
   swap(a.__isset, b.__isset);
 }
 
-ISchemaName::ISchemaName(const ISchemaName& other1160) {
-  catName = other1160.catName;
-  dbName = other1160.dbName;
-  schemaName = other1160.schemaName;
-  __isset = other1160.__isset;
+ISchemaName::ISchemaName(const ISchemaName& other1153) {
+  catName = other1153.catName;
+  dbName = other1153.dbName;
+  schemaName = other1153.schemaName;
+  __isset = other1153.__isset;
 }
-ISchemaName& ISchemaName::operator=(const ISchemaName& other1161) {
-  catName = other1161.catName;
-  dbName = other1161.dbName;
-  schemaName = other1161.schemaName;
-  __isset = other1161.__isset;
+ISchemaName& ISchemaName::operator=(const ISchemaName& other1154) {
+  catName = other1154.catName;
+  dbName = other1154.dbName;
+  schemaName = other1154.schemaName;
+  __isset = other1154.__isset;
   return *this;
 }
 void ISchemaName::printTo(std::ostream& out) const {
@@ -30553,15 +30485,15 @@ void swap(AlterISchemaRequest &a, AlterISchemaRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1162) {
-  name = other1162.name;
-  newSchema = other1162.newSchema;
-  __isset = other1162.__isset;
+AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1155) {
+  name = other1155.name;
+  newSchema = other1155.newSchema;
+  __isset = other1155.__isset;
 }
-AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1163) {
-  name = other1163.name;
-  newSchema = other1163.newSchema;
-  __isset = other1163.__isset;
+AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1156) {
+  name = other1156.name;
+  newSchema = other1156.newSchema;
+  __isset = other1156.__isset;
   return *this;
 }
 void AlterISchemaRequest::printTo(std::ostream& out) const {
@@ -30672,14 +30604,14 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) {
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->cols.clear();
-            uint32_t _size1164;
-            ::apache::thrift::protocol::TType _etype1167;
-            xfer += iprot->readListBegin(_etype1167, _size1164);
-            this->cols.resize(_size1164);
-            uint32_t _i1168;
-            for (_i1168 = 0; _i1168 < _size1164; ++_i1168)
+            uint32_t _size1157;
+            ::apache::thrift::protocol::TType _etype1160;
+            xfer += iprot->readListBegin(_etype1160, _size1157);
+            this->cols.resize(_size1157);
+            uint32_t _i1161;
+            for (_i1161 = 0; _i1161 < _size1157; ++_i1161)
             {
-              xfer += this->cols[_i1168].read(iprot);
+              xfer += this->cols[_i1161].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -30690,9 +30622,9 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) {
         break;
       case 5:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1169;
-          xfer += iprot->readI32(ecast1169);
-          this->state = (SchemaVersionState::type)ecast1169;
+          int32_t ecast1162;
+          xfer += iprot->readI32(ecast1162);
+          this->state = (SchemaVersionState::type)ecast1162;
           this->__isset.state = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -30770,10 +30702,10 @@ uint32_t SchemaVersion::write(::apache::thrift::protocol::TProtocol* oprot) cons
   xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 4);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->cols.size()));
-    std::vector<FieldSchema> ::const_iterator _iter1170;
-    for (_iter1170 = this->cols.begin(); _iter1170 != this->cols.end(); ++_iter1170)
+    std::vector<FieldSchema> ::const_iterator _iter1163;
+    for (_iter1163 = this->cols.begin(); _iter1163 != this->cols.end(); ++_iter1163)
     {
-      xfer += (*_iter1170).write(oprot);
+      xfer += (*_iter1163).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -30829,31 +30761,31 @@ void swap(SchemaVersion &a, SchemaVersion &b) {
   swap(a.__isset, b.__isset);
 }
 
-SchemaVersion::SchemaVersion(const SchemaVersion& other1171) {
-  schema = other1171.schema;
-  version = other1171.version;
-  createdAt = other1171.createdAt;
-  cols = other1171.cols;
-  state = other1171.state;
-  description = other1171.description;
-  schemaText = other1171.schemaText;
-  fingerprint = other1171.fingerprint;
-  name = other1171.name;
-  serDe = other1171.serDe;
-  __isset = other1171.__isset;
-}
-SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1172) {
-  schema = other1172.schema;
-  version = other1172.version;
-  createdAt = other1172.createdAt;
-  cols = other1172.cols;
-  state = other1172.state;
-  description = other1172.description;
-  schemaText = other1172.schemaText;
-  fingerprint = other1172.fingerprint;
-  name = other1172.name;
-  serDe = other1172.serDe;
-  __isset = other1172.__isset;
+SchemaVersion::SchemaVersion(const SchemaVersion& other1164) {
+  schema = other1164.schema;
+  version = other1164.version;
+  createdAt = other1164.createdAt;
+  cols = other1164.cols;
+  state = other1164.state;
+  description = other1164.description;
+  schemaText = other1164.schemaText;
+  fingerprint = other1164.fingerprint;
+  name = other1164.name;
+  serDe = other1164.serDe;
+  __isset = other1164.__isset;
+}
+SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1165) {
+  schema = other1165.schema;
+  version = other1165.version;
+  createdAt = other1165.createdAt;
+  cols = other1165.cols;
+  state = other1165.state;
+  description = other1165.description;
+  schemaText = other1165.schemaText;
+  fingerprint = other1165.fingerprint;
+  name = other1165.name;
+  serDe = other1165.serDe;
+  __isset = other1165.__isset;
   return *this;
 }
 void SchemaVersion::printTo(std::ostream& out) const {
@@ -30959,15 +30891,15 @@ void swap(SchemaVersionDescriptor &a, SchemaVersionDescriptor &b) {
   swap(a.__isset, b.__isset);
 }
 
-SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1173) {
-  schema = other1173.schema;
-  version = other1173.version;
-  __isset = other1173.__isset;
+SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1166) {
+  schema = other1166.schema;
+  version = other1166.version;
+  __isset = other1166.__isset;
 }
-SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1174) {
-  schema = other1174.schema;
-  version = other1174.version;
-  __isset = other1174.__isset;
+SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1167) {
+  schema = other1167.schema;
+  version = other1167.version;
+  __isset = other1167.__isset;
   return *this;
 }
 void SchemaVersionDescriptor::printTo(std::ostream& out) const {
@@ -31088,17 +31020,17 @@ void swap(FindSchemasByColsRqst &a, FindSchemasByColsRqst &b) {
   swap(a.__isset, b.__isset);
 }
 
-FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1175) {
-  colName = other1175.colName;
-  colNamespace = other1175.colNamespace;
-  type = other1175.type;
-  __isset = other1175.__isset;
+FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1168) {
+  colName = other1168.colName;
+  colNamespace = other1168.colNamespace;
+  type = other1168.type;
+  __isset = other1168.__isset;
 }
-FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1176) {
-  colName = other1176.colName;
-  colNamespace = other1176.colNamespace;
-  type = other1176.type;
-  __isset = other1176.__isset;
+FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1169) {
+  colName = other1169.colName;
+  colNamespace = other1169.colNamespace;
+  type = other1169.type;
+  __isset = other1169.__isset;
   return *this;
 }
 void FindSchemasByColsRqst::printTo(std::ostream& out) const {
@@ -31144,14 +31076,14 @@ uint32_t FindSchemasByColsResp::read(::apache::thrift::protocol::TProtocol* ipro
         if (ftype == ::apache::thrift::protocol::T_LIST) {
           {
             this->schemaVersions.clear();
-            uint32_t _size1177;
-            ::apache::thrift::protocol::TType _etype1180;
-            xfer += iprot->readListBegin(_etype1180, _size1177);
-            this->schemaVersions.resize(_size1177);
-            uint32_t _i1181;
-            for (_i1181 = 0; _i1181 < _size1177; ++_i1181)
+            uint32_t _size1170;
+            ::apache::thrift::protocol::TType _etype1173;
+            xfer += iprot->readListBegin(_etype1173, _size1170);
+            this->schemaVersions.resize(_size1170);
+            uint32_t _i1174;
+            for (_i1174 = 0; _i1174 < _size1170; ++_i1174)
             {
-              xfer += this->schemaVersions[_i1181].read(iprot);
+              xfer += this->schemaVersions[_i1174].read(iprot);
             }
             xfer += iprot->readListEnd();
           }
@@ -31180,10 +31112,10 @@ uint32_t FindSchemasByColsResp::write(::apache::thrift::protocol::TProtocol* opr
   xfer += oprot->writeFieldBegin("schemaVersions", ::apache::thrift::protocol::T_LIST, 1);
   {
     xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->schemaVersions.size()));
-    std::vector<SchemaVersionDescriptor> ::const_iterator _iter1182;
-    for (_iter1182 = this->schemaVersions.begin(); _iter1182 != this->schemaVersions.end(); ++_iter1182)
+    std::vector<SchemaVersionDescriptor> ::const_iterator _iter1175;
+    for (_iter1175 = this->schemaVersions.begin(); _iter1175 != this->schemaVersions.end(); ++_iter1175)
     {
-      xfer += (*_iter1182).write(oprot);
+      xfer += (*_iter1175).write(oprot);
     }
     xfer += oprot->writeListEnd();
   }
@@ -31200,13 +31132,13 @@ void swap(FindSchemasByColsResp &a, FindSchemasByColsResp &b) {
   swap(a.__isset, b.__isset);
 }
 
-FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1183) {
-  schemaVersions = other1183.schemaVersions;
-  __isset = other1183.__isset;
+FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1176) {
+  schemaVersions = other1176.schemaVersions;
+  __isset = other1176.__isset;
 }
-FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1184) {
-  schemaVersions = other1184.schemaVersions;
-  __isset = other1184.__isset;
+FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1177) {
+  schemaVersions = other1177.schemaVersions;
+  __isset = other1177.__isset;
   return *this;
 }
 void FindSchemasByColsResp::printTo(std::ostream& out) const {
@@ -31303,15 +31235,15 @@ void swap(MapSchemaVersionToSerdeRequest &a, MapSchemaVersionToSerdeRequest &b)
   swap(a.__isset, b.__isset);
 }
 
-MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1185) {
-  schemaVersion = other1185.schemaVersion;
-  serdeName = other1185.serdeName;
-  __isset = other1185.__isset;
+MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1178) {
+  schemaVersion = other1178.schemaVersion;
+  serdeName = other1178.serdeName;
+  __isset = other1178.__isset;
 }
-MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1186) {
-  schemaVersion = other1186.schemaVersion;
-  serdeName = other1186.serdeName;
-  __isset = other1186.__isset;
+MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1179) {
+  schemaVersion = other1179.schemaVersion;
+  serdeName = other1179.serdeName;
+  __isset = other1179.__isset;
   return *this;
 }
 void MapSchemaVersionToSerdeRequest::printTo(std::ostream& out) const {
@@ -31366,9 +31298,9 @@ uint32_t SetSchemaVersionStateRequest::read(::apache::thrift::protocol::TProtoco
         break;
       case 2:
         if (ftype == ::apache::thrift::protocol::T_I32) {
-          int32_t ecast1187;
-          xfer += iprot->readI32(ecast1187);
-          this->state = (SchemaVersionState::type)ecast1187;
+          int32_t ecast1180;
+          xfer += iprot->readI32(ecast1180);
+          this->state = (SchemaVersionState::type)ecast1180;
           this->__isset.state = true;
         } else {
           xfer += iprot->skip(ftype);
@@ -31411,15 +31343,15 @@ void swap(SetSchemaVersionStateRequest &a, SetSchemaVersionStateRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1188) {
-  schemaVersion = other1188.schemaVersion;
-  state = other1188.state;
-  __isset = other1188.__isset;
+SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1181) {
+  schemaVersion = other1181.schemaVersion;
+  state = other1181.state;
+  __isset = other1181.__isset;
 }
-SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1189) {
-  schemaVersion = other1189.schemaVersion;
-  state = other1189.state;
-  __isset = other1189.__isset;
+SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1182) {
+  schemaVersion = other1182.schemaVersion;
+  state = other1182.state;
+  __isset = other1182.__isset;
   return *this;
 }
 void SetSchemaVersionStateRequest::printTo(std::ostream& out) const {
@@ -31500,13 +31432,13 @@ void swap(GetSerdeRequest &a, GetSerdeRequest &b) {
   swap(a.__isset, b.__isset);
 }
 
-GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1190) {
-  serdeName = other1190.serdeName;
-  __isset = other1190.__isset;
+GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1183) {
+  serdeName = other1183.serdeName;
+  __isset = other1183.__isset;
 }
-GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1191) {
-  serdeName = other1191.serdeName;
-  __isset = other1191.__isset;
+GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1184) {
+  serdeName = other1184.serdeName;
+  __isset = other1184.__isset;
   return *this;
 }
 void GetSerdeRequest::printTo(std::ostream& out) const {
@@ -31628,17 +31560,17 @@ void swap(RuntimeStat &a, RuntimeStat &b) {
   swap(a.__isset, b.__isset);
 }
 
-RuntimeStat::RuntimeStat(const RuntimeStat& other1192) {
-  createTime = other1192.createTime;
-  weight = other1192.weight;
-  payload = other1192.payload;
-  __isset = other1192.__isset;
+RuntimeStat::RuntimeStat(const RuntimeStat& other1185) {
+  createTime = other1185.createTime;
+  weight = other1185.weight;
+  payload = other1185.payload;
+  __isset = other1185.__isset;
 }
-RuntimeStat& RuntimeStat::operator=(const RuntimeStat& other1193) {
-  createTime = other1193.createTime;
-  weight = other1193.weight;
-  payload = other1193.payload;
-  __isset = other1193.__isset;
+RuntimeStat& RuntimeStat::operator=(const RuntimeStat& other1186) {
+  createTime = other1186.createTime;
+  weight = other1186.weight;
+  payload = other1186.payload;
+  __isset = other1186.__isset;
   return *this;
 }
 void RuntimeStat::printTo(std::ostream& out) const {
@@ -31742,13 +31674,13 @@ void swap(GetRuntimeStatsRequest &a, GetRuntimeStatsRequest &b) {
   swap(a.maxCreateTime, b.maxCreateTime);
 }
 
-GetRuntimeStatsRequest::GetRuntimeStatsRequest(const GetRuntimeStatsRequest& other1194) {
-  maxWeight = other1194.maxWeight;
-  maxCreateTime = other1194.maxCreateTime;
+GetRuntimeStatsRequest::GetRuntimeStatsRequest(const GetRuntimeStatsRequest& other1187) {
+  maxWeight = other1187.maxWeight;
+  maxCreateTime = other1187.maxCreateTime;
 }
-GetRuntimeStatsRequest& GetRuntimeStatsRequest::operator=(const GetRuntimeStatsRequest& other1195) {
-  maxWeight = other1195.maxWeight;
-  maxCreateTime = other1195.maxCreateTime;
+GetRuntimeStatsRequest& GetRuntimeStatsRequest::operator=(const GetRuntimeStatsRequest& other1188) {
+  maxWeight = other1188.maxWeight;
+  maxCreateTime = other1188.maxCreateTime;
   return *this;
 }
 void GetRuntimeStatsRequest::printTo(std::ostream& out) const {
@@ -31829,13 +31761,13 @@ void swap(MetaException &a, MetaException &b) {
   swap(a.__isset, b.__isset);
 }
 
-MetaException::MetaException(const MetaException& other1196) : TException() {
-  message = other1196.message;
-  __isset = other1196.__isset;
+MetaException::MetaException(const MetaException& other1189) : TException() {
+  message = other1189.message;
+  __isset = other1189.__isset;
 }
-MetaException& MetaException::operator=(const MetaException& other1197) {
-  message = other1197.message;
-  __isset = other1197.__isset;
+MetaException& MetaException::operator=(const MetaException& other1190) {
+  message = other1190.message;
+  __isset = other1190.__isset;
   return *this;
 }
 void MetaException::printTo(std::ostream& out) const {
@@ -31926,13 +31858,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) {
   swap(a.__isset, b.__isset);
 }
 
-UnknownTableException::UnknownTableException(const UnknownTableException& other1198) : TException() {
-  message = other1198.message;
-  __isset = other1198.__isset;
+UnknownTableException::UnknownTableException(const UnknownTableException& other1191) : TException() {
+

<TRUNCATED>

[11/48] hive git commit: HIVE-20090 : Extend creation of semijoin reduction filters to be able to discover new opportunities (Jesus Camacho Rodriguez via Deepak Jaiswal)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query39.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query39.q.out b/ql/src/test/results/clientpositive/perf/tez/query39.q.out
index 5966e24..514f5d4 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query39.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query39.q.out
@@ -69,23 +69,23 @@ Stage-0
     limit:-1
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_220]
-        Select Operator [SEL_219] (rows=13756683 width=15)
+      File Output Operator [FS_232]
+        Select Operator [SEL_231] (rows=13756683 width=15)
           Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9"]
         <-Reducer 6 [SIMPLE_EDGE]
           SHUFFLE [RS_60]
-            Merge Join Operator [MERGEJOIN_190] (rows=13756683 width=15)
-              Conds:RS_213._col0, _col1=RS_218._col0, _col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
+            Merge Join Operator [MERGEJOIN_202] (rows=13756683 width=15)
+              Conds:RS_225._col0, _col1=RS_230._col0, _col1(Inner),Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7"]
             <-Reducer 11 [SIMPLE_EDGE] vectorized
-              SHUFFLE [RS_218]
+              SHUFFLE [RS_230]
                 PartitionCols:_col0, _col1
-                Select Operator [SEL_217] (rows=12506076 width=15)
+                Select Operator [SEL_229] (rows=12506076 width=15)
                   Output:["_col0","_col1","_col2","_col3"]
-                  Filter Operator [FIL_216] (rows=12506076 width=15)
+                  Filter Operator [FIL_228] (rows=12506076 width=15)
                     predicate:CASE WHEN (((_col3 / _col4) = 0)) THEN (false) ELSE (((power(((_col5 - ((_col6 * _col6) / _col4)) / CASE WHEN ((_col4 = 1L)) THEN (null) ELSE ((_col4 - 1)) END), 0.5) / (_col3 / _col4)) > 1.0D)) END
-                    Select Operator [SEL_215] (rows=25012152 width=15)
+                    Select Operator [SEL_227] (rows=25012152 width=15)
                       Output:["_col0","_col1","_col3","_col4","_col5","_col6"]
-                      Group By Operator [GBY_214] (rows=25012152 width=15)
+                      Group By Operator [GBY_226] (rows=25012152 width=15)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2
                       <-Reducer 10 [SIMPLE_EDGE]
                         SHUFFLE [RS_52]
@@ -94,64 +94,64 @@ Stage-0
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col3)","count(_col3)","sum(_col5)","sum(_col4)"],keys:_col1, _col2, _col0
                             Select Operator [SEL_49] (rows=50024305 width=15)
                               Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Merge Join Operator [MERGEJOIN_189] (rows=50024305 width=15)
-                                Conds:RS_46._col2=RS_208._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
+                              Merge Join Operator [MERGEJOIN_201] (rows=50024305 width=15)
+                                Conds:RS_46._col2=RS_220._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
                               <-Map 14 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_208]
+                                SHUFFLE [RS_220]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_206] (rows=27 width=1029)
+                                  Select Operator [SEL_218] (rows=27 width=1029)
                                     Output:["_col0","_col1"]
-                                    Filter Operator [FIL_205] (rows=27 width=1029)
+                                    Filter Operator [FIL_217] (rows=27 width=1029)
                                       predicate:w_warehouse_sk is not null
                                       TableScan [TS_9] (rows=27 width=1029)
                                         default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_warehouse_name"]
                               <-Reducer 9 [SIMPLE_EDGE]
                                 SHUFFLE [RS_46]
                                   PartitionCols:_col2
-                                  Merge Join Operator [MERGEJOIN_188] (rows=45476640 width=15)
-                                    Conds:RS_43._col1=RS_204._col0(Inner),Output:["_col2","_col3","_col7"]
+                                  Merge Join Operator [MERGEJOIN_200] (rows=45476640 width=15)
+                                    Conds:RS_43._col1=RS_216._col0(Inner),Output:["_col2","_col3","_col7"]
                                   <-Map 13 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_204]
+                                    SHUFFLE [RS_216]
                                       PartitionCols:_col0
-                                      Select Operator [SEL_202] (rows=462000 width=1436)
+                                      Select Operator [SEL_214] (rows=462000 width=1436)
                                         Output:["_col0"]
-                                        Filter Operator [FIL_201] (rows=462000 width=1436)
+                                        Filter Operator [FIL_213] (rows=462000 width=1436)
                                           predicate:i_item_sk is not null
                                           TableScan [TS_6] (rows=462000 width=1436)
                                             default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk"]
                                   <-Reducer 8 [SIMPLE_EDGE]
                                     SHUFFLE [RS_43]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_187] (rows=41342400 width=15)
-                                        Conds:RS_194._col0=RS_200._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      Merge Join Operator [MERGEJOIN_199] (rows=41342400 width=15)
+                                        Conds:RS_206._col0=RS_212._col0(Inner),Output:["_col1","_col2","_col3"]
                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_194]
+                                        SHUFFLE [RS_206]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_192] (rows=37584000 width=15)
+                                          Select Operator [SEL_204] (rows=37584000 width=15)
                                             Output:["_col0","_col1","_col2","_col3"]
-                                            Filter Operator [FIL_191] (rows=37584000 width=15)
+                                            Filter Operator [FIL_203] (rows=37584000 width=15)
                                               predicate:(inv_date_sk is not null and inv_item_sk is not null and inv_warehouse_sk is not null)
                                               TableScan [TS_0] (rows=37584000 width=15)
                                                 default@inventory,inventory,Tbl:COMPLETE,Col:NONE,Output:["inv_date_sk","inv_item_sk","inv_warehouse_sk","inv_quantity_on_hand"]
                                       <-Map 12 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_200]
+                                        SHUFFLE [RS_212]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_198] (rows=18262 width=1119)
+                                          Select Operator [SEL_210] (rows=18262 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_196] (rows=18262 width=1119)
+                                            Filter Operator [FIL_208] (rows=18262 width=1119)
                                               predicate:((d_moy = 5) and (d_year = 1999) and d_date_sk is not null)
                                               TableScan [TS_3] (rows=73049 width=1119)
                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
             <-Reducer 5 [SIMPLE_EDGE] vectorized
-              SHUFFLE [RS_213]
+              SHUFFLE [RS_225]
                 PartitionCols:_col0, _col1
-                Select Operator [SEL_212] (rows=12506076 width=15)
+                Select Operator [SEL_224] (rows=12506076 width=15)
                   Output:["_col0","_col1","_col2","_col3"]
-                  Filter Operator [FIL_211] (rows=12506076 width=15)
+                  Filter Operator [FIL_223] (rows=12506076 width=15)
                     predicate:CASE WHEN (((_col3 / _col4) = 0)) THEN (false) ELSE (((power(((_col5 - ((_col6 * _col6) / _col4)) / CASE WHEN ((_col4 = 1L)) THEN (null) ELSE ((_col4 - 1)) END), 0.5) / (_col3 / _col4)) > 1.0D)) END
-                    Select Operator [SEL_210] (rows=25012152 width=15)
+                    Select Operator [SEL_222] (rows=25012152 width=15)
                       Output:["_col0","_col1","_col3","_col4","_col5","_col6"]
-                      Group By Operator [GBY_209] (rows=25012152 width=15)
+                      Group By Operator [GBY_221] (rows=25012152 width=15)
                         Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(VALUE._col0)","count(VALUE._col1)","sum(VALUE._col2)","sum(VALUE._col3)"],keys:KEY._col0, KEY._col1, KEY._col2
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]
@@ -160,36 +160,36 @@ Stage-0
                             Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6"],aggregations:["sum(_col3)","count(_col3)","sum(_col5)","sum(_col4)"],keys:_col1, _col2, _col0
                             Select Operator [SEL_21] (rows=50024305 width=15)
                               Output:["_col0","_col1","_col2","_col3","_col4","_col5"]
-                              Merge Join Operator [MERGEJOIN_186] (rows=50024305 width=15)
-                                Conds:RS_18._col2=RS_207._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
+                              Merge Join Operator [MERGEJOIN_198] (rows=50024305 width=15)
+                                Conds:RS_18._col2=RS_219._col0(Inner),Output:["_col3","_col7","_col8","_col9"]
                               <-Map 14 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_207]
+                                SHUFFLE [RS_219]
                                   PartitionCols:_col0
-                                   Please refer to the previous Select Operator [SEL_206]
+                                   Please refer to the previous Select Operator [SEL_218]
                               <-Reducer 3 [SIMPLE_EDGE]
                                 SHUFFLE [RS_18]
                                   PartitionCols:_col2
-                                  Merge Join Operator [MERGEJOIN_185] (rows=45476640 width=15)
-                                    Conds:RS_15._col1=RS_203._col0(Inner),Output:["_col2","_col3","_col7"]
+                                  Merge Join Operator [MERGEJOIN_197] (rows=45476640 width=15)
+                                    Conds:RS_15._col1=RS_215._col0(Inner),Output:["_col2","_col3","_col7"]
                                   <-Map 13 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_203]
+                                    SHUFFLE [RS_215]
                                       PartitionCols:_col0
-                                       Please refer to the previous Select Operator [SEL_202]
+                                       Please refer to the previous Select Operator [SEL_214]
                                   <-Reducer 2 [SIMPLE_EDGE]
                                     SHUFFLE [RS_15]
                                       PartitionCols:_col1
-                                      Merge Join Operator [MERGEJOIN_184] (rows=41342400 width=15)
-                                        Conds:RS_193._col0=RS_199._col0(Inner),Output:["_col1","_col2","_col3"]
+                                      Merge Join Operator [MERGEJOIN_196] (rows=41342400 width=15)
+                                        Conds:RS_205._col0=RS_211._col0(Inner),Output:["_col1","_col2","_col3"]
                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_193]
+                                        SHUFFLE [RS_205]
                                           PartitionCols:_col0
-                                           Please refer to the previous Select Operator [SEL_192]
+                                           Please refer to the previous Select Operator [SEL_204]
                                       <-Map 12 [SIMPLE_EDGE] vectorized
-                                        SHUFFLE [RS_199]
+                                        SHUFFLE [RS_211]
                                           PartitionCols:_col0
-                                          Select Operator [SEL_197] (rows=18262 width=1119)
+                                          Select Operator [SEL_209] (rows=18262 width=1119)
                                             Output:["_col0"]
-                                            Filter Operator [FIL_195] (rows=18262 width=1119)
+                                            Filter Operator [FIL_207] (rows=18262 width=1119)
                                               predicate:((d_moy = 4) and (d_year = 1999) and d_date_sk is not null)
                                                Please refer to the previous TableScan [TS_3]
 

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query40.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query40.q.out b/ql/src/test/results/clientpositive/perf/tez/query40.q.out
index 2f116f1..9920ad3 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query40.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query40.q.out
@@ -71,14 +71,14 @@ Stage-0
     limit:100
     Stage-1
       Reducer 7 vectorized
-      File Output Operator [FS_134]
-        Limit [LIM_133] (rows=100 width=135)
+      File Output Operator [FS_135]
+        Limit [LIM_134] (rows=100 width=135)
           Number of rows:100
-          Select Operator [SEL_132] (rows=210822976 width=135)
+          Select Operator [SEL_133] (rows=210822976 width=135)
             Output:["_col0","_col1","_col2","_col3"]
           <-Reducer 6 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_131]
-              Group By Operator [GBY_130] (rows=210822976 width=135)
+            SHUFFLE [RS_132]
+              Group By Operator [GBY_131] (rows=210822976 width=135)
                 Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(VALUE._col0)","sum(VALUE._col1)"],keys:KEY._col0, KEY._col1
               <-Reducer 5 [SIMPLE_EDGE]
                 SHUFFLE [RS_30]
@@ -87,98 +87,98 @@ Stage-0
                     Output:["_col0","_col1","_col2","_col3"],aggregations:["sum(_col2)","sum(_col3)"],keys:_col0, _col1
                     Select Operator [SEL_27] (rows=421645953 width=135)
                       Output:["_col0","_col1","_col2","_col3"]
-                      Merge Join Operator [MERGEJOIN_99] (rows=421645953 width=135)
-                        Conds:RS_24._col1=RS_118._col0(Inner),Output:["_col4","_col7","_col9","_col11","_col14"]
+                      Merge Join Operator [MERGEJOIN_100] (rows=421645953 width=135)
+                        Conds:RS_24._col1=RS_119._col0(Inner),Output:["_col4","_col7","_col9","_col11","_col14"]
                       <-Map 13 [SIMPLE_EDGE] vectorized
-                        SHUFFLE [RS_118]
+                        SHUFFLE [RS_119]
                           PartitionCols:_col0
-                          Select Operator [SEL_117] (rows=27 width=1029)
+                          Select Operator [SEL_118] (rows=27 width=1029)
                             Output:["_col0","_col1"]
-                            Filter Operator [FIL_116] (rows=27 width=1029)
+                            Filter Operator [FIL_117] (rows=27 width=1029)
                               predicate:w_warehouse_sk is not null
                               TableScan [TS_12] (rows=27 width=1029)
                                 default@warehouse,warehouse,Tbl:COMPLETE,Col:NONE,Output:["w_warehouse_sk","w_state"]
                       <-Reducer 4 [SIMPLE_EDGE]
                         SHUFFLE [RS_24]
                           PartitionCols:_col1
-                          Merge Join Operator [MERGEJOIN_98] (rows=383314495 width=135)
-                            Conds:RS_21._col2=RS_110._col0(Inner),Output:["_col1","_col4","_col7","_col9","_col11"]
+                          Merge Join Operator [MERGEJOIN_99] (rows=383314495 width=135)
+                            Conds:RS_21._col2=RS_111._col0(Inner),Output:["_col1","_col4","_col7","_col9","_col11"]
                           <-Map 11 [SIMPLE_EDGE] vectorized
-                            SHUFFLE [RS_110]
+                            SHUFFLE [RS_111]
                               PartitionCols:_col0
-                              Select Operator [SEL_109] (rows=51333 width=1436)
+                              Select Operator [SEL_110] (rows=51333 width=1436)
                                 Output:["_col0","_col1"]
-                                Filter Operator [FIL_108] (rows=51333 width=1436)
+                                Filter Operator [FIL_109] (rows=51333 width=1436)
                                   predicate:(i_current_price BETWEEN 0.99 AND 1.49 and i_item_sk is not null)
                                   TableScan [TS_9] (rows=462000 width=1436)
                                     default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_item_id","i_current_price"]
                           <-Reducer 3 [SIMPLE_EDGE]
                             SHUFFLE [RS_21]
                               PartitionCols:_col2
-                              Merge Join Operator [MERGEJOIN_97] (rows=348467716 width=135)
-                                Conds:RS_18._col0=RS_102._col0(Inner),Output:["_col1","_col2","_col4","_col7","_col9"]
+                              Merge Join Operator [MERGEJOIN_98] (rows=348467716 width=135)
+                                Conds:RS_18._col0=RS_103._col0(Inner),Output:["_col1","_col2","_col4","_col7","_col9"]
                               <-Map 9 [SIMPLE_EDGE] vectorized
-                                SHUFFLE [RS_102]
+                                SHUFFLE [RS_103]
                                   PartitionCols:_col0
-                                  Select Operator [SEL_101] (rows=8116 width=1119)
+                                  Select Operator [SEL_102] (rows=8116 width=1119)
                                     Output:["_col0","_col1"]
-                                    Filter Operator [FIL_100] (rows=8116 width=1119)
+                                    Filter Operator [FIL_101] (rows=8116 width=1119)
                                       predicate:(CAST( d_date AS TIMESTAMP) BETWEEN TIMESTAMP'1998-03-09 00:00:00' AND TIMESTAMP'1998-05-08 00:00:00' and d_date_sk is not null)
                                       TableScan [TS_6] (rows=73049 width=1119)
                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_date"]
                               <-Reducer 2 [SIMPLE_EDGE]
                                 SHUFFLE [RS_18]
                                   PartitionCols:_col0
-                                  Merge Join Operator [MERGEJOIN_96] (rows=316788826 width=135)
-                                    Conds:RS_126._col2, _col3=RS_129._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col7"]
+                                  Merge Join Operator [MERGEJOIN_97] (rows=316788826 width=135)
+                                    Conds:RS_127._col2, _col3=RS_130._col0, _col1(Left Outer),Output:["_col0","_col1","_col2","_col4","_col7"]
                                   <-Map 1 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_126]
+                                    SHUFFLE [RS_127]
                                       PartitionCols:_col2, _col3
-                                      Select Operator [SEL_125] (rows=287989836 width=135)
+                                      Select Operator [SEL_126] (rows=287989836 width=135)
                                         Output:["_col0","_col1","_col2","_col3","_col4"]
-                                        Filter Operator [FIL_124] (rows=287989836 width=135)
+                                        Filter Operator [FIL_125] (rows=287989836 width=135)
                                           predicate:((cs_item_sk BETWEEN DynamicValue(RS_22_item_i_item_sk_min) AND DynamicValue(RS_22_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_22_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_19_date_dim_d_date_sk_min) AND DynamicValue(RS_19_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_19_date_dim_d_date_sk_bloom_filter))) and (cs_warehouse_sk BETWEEN DynamicValue(RS_25_warehouse_w_warehouse_sk_min) AND DynamicValue(RS_25_warehouse_w_warehouse_sk_max) and in_bloom_filter(cs_warehouse_sk, DynamicValue(RS_25_warehouse_w_warehouse_sk_bloom_filter))) and cs_item_sk is not null and cs_sold_date_sk is not null and cs_warehouse_sk is not null)
                                           TableScan [TS_0] (rows=287989836 width=135)
                                             default@catalog_sales,catalog_sales,Tbl:COMPLETE,Col:NONE,Output:["cs_sold_date_sk","cs_warehouse_sk","cs_item_sk","cs_order_number","cs_sales_price"]
                                           <-Reducer 10 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_107]
-                                              Group By Operator [GBY_106] (rows=1 width=12)
+                                            BROADCAST [RS_108]
+                                              Group By Operator [GBY_107] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                               <-Map 9 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_105]
-                                                  Group By Operator [GBY_104] (rows=1 width=12)
+                                                SHUFFLE [RS_106]
+                                                  Group By Operator [GBY_105] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_103] (rows=8116 width=1119)
+                                                    Select Operator [SEL_104] (rows=8116 width=1119)
                                                       Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_101]
+                                                       Please refer to the previous Select Operator [SEL_102]
                                           <-Reducer 12 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_115]
-                                              Group By Operator [GBY_114] (rows=1 width=12)
+                                            BROADCAST [RS_116]
+                                              Group By Operator [GBY_115] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                               <-Map 11 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_113]
-                                                  Group By Operator [GBY_112] (rows=1 width=12)
+                                                SHUFFLE [RS_114]
+                                                  Group By Operator [GBY_113] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_111] (rows=51333 width=1436)
+                                                    Select Operator [SEL_112] (rows=51333 width=1436)
                                                       Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_109]
+                                                       Please refer to the previous Select Operator [SEL_110]
                                           <-Reducer 14 [BROADCAST_EDGE] vectorized
-                                            BROADCAST [RS_123]
-                                              Group By Operator [GBY_122] (rows=1 width=12)
+                                            BROADCAST [RS_124]
+                                              Group By Operator [GBY_123] (rows=1 width=12)
                                                 Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                               <-Map 13 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_121]
-                                                  Group By Operator [GBY_120] (rows=1 width=12)
+                                                SHUFFLE [RS_122]
+                                                  Group By Operator [GBY_121] (rows=1 width=12)
                                                     Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                    Select Operator [SEL_119] (rows=27 width=1029)
+                                                    Select Operator [SEL_120] (rows=27 width=1029)
                                                       Output:["_col0"]
-                                                       Please refer to the previous Select Operator [SEL_117]
+                                                       Please refer to the previous Select Operator [SEL_118]
                                   <-Map 8 [SIMPLE_EDGE] vectorized
-                                    SHUFFLE [RS_129]
+                                    SHUFFLE [RS_130]
                                       PartitionCols:_col0, _col1
-                                      Select Operator [SEL_128] (rows=28798881 width=106)
+                                      Select Operator [SEL_129] (rows=28798881 width=106)
                                         Output:["_col0","_col1","_col2"]
-                                        Filter Operator [FIL_127] (rows=28798881 width=106)
+                                        Filter Operator [FIL_128] (rows=28798881 width=106)
                                           predicate:cr_item_sk is not null
                                           TableScan [TS_3] (rows=28798881 width=106)
                                             default@catalog_returns,catalog_returns,Tbl:COMPLETE,Col:NONE,Output:["cr_item_sk","cr_order_number","cr_refunded_cash"]

http://git-wip-us.apache.org/repos/asf/hive/blob/ab9e954d/ql/src/test/results/clientpositive/perf/tez/query54.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/perf/tez/query54.q.out b/ql/src/test/results/clientpositive/perf/tez/query54.q.out
index 8ab239c..3e3c607 100644
--- a/ql/src/test/results/clientpositive/perf/tez/query54.q.out
+++ b/ql/src/test/results/clientpositive/perf/tez/query54.q.out
@@ -1,7 +1,7 @@
-Warning: Shuffle Join MERGEJOIN[269][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product
-Warning: Shuffle Join MERGEJOIN[270][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 5' is a cross product
-Warning: Shuffle Join MERGEJOIN[268][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 33' is a cross product
-Warning: Shuffle Join MERGEJOIN[271][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 6' is a cross product
+Warning: Shuffle Join MERGEJOIN[271][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product
+Warning: Shuffle Join MERGEJOIN[272][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 5' is a cross product
+Warning: Shuffle Join MERGEJOIN[270][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 33' is a cross product
+Warning: Shuffle Join MERGEJOIN[273][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 6' is a cross product
 PREHOOK: query: explain
 with my_customers as (
  select distinct c_customer_sk
@@ -148,25 +148,25 @@ Stage-0
     limit:100
     Stage-1
       Reducer 9 vectorized
-      File Output Operator [FS_358]
-        Limit [LIM_357] (rows=100 width=158)
+      File Output Operator [FS_360]
+        Limit [LIM_359] (rows=100 width=158)
           Number of rows:100
-          Select Operator [SEL_356] (rows=1614130953450400 width=158)
+          Select Operator [SEL_358] (rows=1614130953450400 width=158)
             Output:["_col0","_col1","_col2"]
           <-Reducer 8 [SIMPLE_EDGE] vectorized
-            SHUFFLE [RS_355]
-              Select Operator [SEL_354] (rows=1614130953450400 width=158)
+            SHUFFLE [RS_357]
+              Select Operator [SEL_356] (rows=1614130953450400 width=158)
                 Output:["_col0","_col1","_col2"]
-                Group By Operator [GBY_353] (rows=1614130953450400 width=158)
+                Group By Operator [GBY_355] (rows=1614130953450400 width=158)
                   Output:["_col0","_col1"],aggregations:["count(VALUE._col0)"],keys:KEY._col0
                 <-Reducer 7 [SIMPLE_EDGE] vectorized
-                  SHUFFLE [RS_352]
+                  SHUFFLE [RS_354]
                     PartitionCols:_col0
-                    Group By Operator [GBY_351] (rows=3228261906900801 width=158)
+                    Group By Operator [GBY_353] (rows=3228261906900801 width=158)
                       Output:["_col0","_col1"],aggregations:["count()"],keys:_col0
-                      Select Operator [SEL_350] (rows=3228261906900801 width=158)
+                      Select Operator [SEL_352] (rows=3228261906900801 width=158)
                         Output:["_col0"]
-                        Group By Operator [GBY_349] (rows=3228261906900801 width=158)
+                        Group By Operator [GBY_351] (rows=3228261906900801 width=158)
                           Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0
                         <-Reducer 6 [SIMPLE_EDGE]
                           SHUFFLE [RS_119]
@@ -179,257 +179,257 @@ Stage-0
                                   predicate:_col11 BETWEEN _col13 AND _col15
                                   Select Operator [SEL_115] (rows=58108714324214428 width=158)
                                     Output:["_col0","_col4","_col11","_col13","_col15"]
-                                    Merge Join Operator [MERGEJOIN_271] (rows=58108714324214428 width=158)
+                                    Merge Join Operator [MERGEJOIN_273] (rows=58108714324214428 width=158)
                                       Conds:(Inner),Output:["_col0","_col2","_col6","_col13","_col15"]
                                     <-Reducer 33 [CUSTOM_SIMPLE_EDGE]
                                       PARTITION_ONLY_SHUFFLE [RS_112]
-                                        Merge Join Operator [MERGEJOIN_268] (rows=9131 width=1128)
+                                        Merge Join Operator [MERGEJOIN_270] (rows=9131 width=1128)
                                           Conds:(Right Outer),Output:["_col0"]
                                         <-Reducer 32 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          PARTITION_ONLY_SHUFFLE [RS_340]
-                                            Group By Operator [GBY_339] (rows=9131 width=1119)
+                                          PARTITION_ONLY_SHUFFLE [RS_342]
+                                            Group By Operator [GBY_341] (rows=9131 width=1119)
                                               Output:["_col0"],keys:KEY._col0
                                             <-Map 29 [SIMPLE_EDGE] vectorized
-                                              SHUFFLE [RS_328]
+                                              SHUFFLE [RS_330]
                                                 PartitionCols:_col0
-                                                Group By Operator [GBY_325] (rows=18262 width=1119)
+                                                Group By Operator [GBY_327] (rows=18262 width=1119)
                                                   Output:["_col0"],keys:_col0
-                                                  Select Operator [SEL_322] (rows=18262 width=1119)
+                                                  Select Operator [SEL_324] (rows=18262 width=1119)
                                                     Output:["_col0"]
-                                                    Filter Operator [FIL_320] (rows=18262 width=1119)
+                                                    Filter Operator [FIL_322] (rows=18262 width=1119)
                                                       predicate:((d_moy = 3) and (d_year = 1999))
                                                       TableScan [TS_73] (rows=73049 width=1119)
                                                         default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_month_seq","d_year","d_moy"]
                                         <-Reducer 35 [CUSTOM_SIMPLE_EDGE] vectorized
-                                          PARTITION_ONLY_SHUFFLE [RS_348]
-                                            Select Operator [SEL_347] (rows=1 width=8)
-                                              Filter Operator [FIL_346] (rows=1 width=8)
+                                          PARTITION_ONLY_SHUFFLE [RS_350]
+                                            Select Operator [SEL_349] (rows=1 width=8)
+                                              Filter Operator [FIL_348] (rows=1 width=8)
                                                 predicate:(sq_count_check(_col0) <= 1)
-                                                Group By Operator [GBY_345] (rows=1 width=8)
+                                                Group By Operator [GBY_347] (rows=1 width=8)
                                                   Output:["_col0"],aggregations:["count(VALUE._col0)"]
                                                 <-Reducer 34 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                  PARTITION_ONLY_SHUFFLE [RS_344]
-                                                    Group By Operator [GBY_343] (rows=1 width=8)
+                                                  PARTITION_ONLY_SHUFFLE [RS_346]
+                                                    Group By Operator [GBY_345] (rows=1 width=8)
                                                       Output:["_col0"],aggregations:["count()"]
-                                                      Select Operator [SEL_342] (rows=9131 width=1119)
-                                                        Group By Operator [GBY_341] (rows=9131 width=1119)
+                                                      Select Operator [SEL_344] (rows=9131 width=1119)
+                                                        Group By Operator [GBY_343] (rows=9131 width=1119)
                                                           Output:["_col0"],keys:KEY._col0
                                                         <-Map 29 [SIMPLE_EDGE] vectorized
-                                                          SHUFFLE [RS_329]
+                                                          SHUFFLE [RS_331]
                                                             PartitionCols:_col0
-                                                            Group By Operator [GBY_326] (rows=18262 width=1119)
+                                                            Group By Operator [GBY_328] (rows=18262 width=1119)
                                                               Output:["_col0"],keys:_col0
-                                                              Select Operator [SEL_323] (rows=18262 width=1119)
+                                                              Select Operator [SEL_325] (rows=18262 width=1119)
                                                                 Output:["_col0"]
-                                                                 Please refer to the previous Filter Operator [FIL_320]
+                                                                 Please refer to the previous Filter Operator [FIL_322]
                                     <-Reducer 5 [CUSTOM_SIMPLE_EDGE]
                                       PARTITION_ONLY_SHUFFLE [RS_113]
                                         Select Operator [SEL_108] (rows=6363893803988 width=1217)
                                           Output:["_col0","_col4","_col11","_col13"]
-                                          Merge Join Operator [MERGEJOIN_270] (rows=6363893803988 width=1217)
+                                          Merge Join Operator [MERGEJOIN_272] (rows=6363893803988 width=1217)
                                             Conds:(Left Outer),Output:["_col2","_col4","_col10","_col13"]
                                           <-Reducer 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                            PARTITION_ONLY_SHUFFLE [RS_332]
-                                              Group By Operator [GBY_330] (rows=9131 width=1119)
+                                            PARTITION_ONLY_SHUFFLE [RS_334]
+                                              Group By Operator [GBY_332] (rows=9131 width=1119)
                                                 Output:["_col0"],keys:KEY._col0
                                               <-Map 29 [SIMPLE_EDGE] vectorized
-                                                SHUFFLE [RS_327]
+                                                SHUFFLE [RS_329]
                                                   PartitionCols:_col0
-                                                  Group By Operator [GBY_324] (rows=18262 width=1119)
+                                                  Group By Operator [GBY_326] (rows=18262 width=1119)
                                                     Output:["_col0"],keys:_col0
-                                                    Select Operator [SEL_321] (rows=18262 width=1119)
+                                                    Select Operator [SEL_323] (rows=18262 width=1119)
                                                       Output:["_col0"]
-                                                       Please refer to the previous Filter Operator [FIL_320]
+                                                       Please refer to the previous Filter Operator [FIL_322]
                                           <-Reducer 4 [CUSTOM_SIMPLE_EDGE]
                                             PARTITION_ONLY_SHUFFLE [RS_105]
-                                              Merge Join Operator [MERGEJOIN_269] (rows=696954748 width=97)
+                                              Merge Join Operator [MERGEJOIN_271] (rows=696954748 width=97)
                                                 Conds:(Inner),Output:["_col2","_col4","_col10"]
                                               <-Reducer 3 [CUSTOM_SIMPLE_EDGE]
                                                 PARTITION_ONLY_SHUFFLE [RS_102]
-                                                  Merge Join Operator [MERGEJOIN_267] (rows=696954748 width=88)
+                                                  Merge Join Operator [MERGEJOIN_269] (rows=696954748 width=88)
                                                     Conds:RS_99._col1=RS_100._col5(Inner),Output:["_col2","_col4","_col10"]
                                                   <-Reducer 14 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_100]
                                                       PartitionCols:_col5
-                                                      Merge Join Operator [MERGEJOIN_266] (rows=316240138 width=135)
-                                                        Conds:RS_69._col0=RS_319._col1(Inner),Output:["_col5"]
+                                                      Merge Join Operator [MERGEJOIN_268] (rows=316240138 width=135)
+                                                        Conds:RS_69._col0=RS_321._col1(Inner),Output:["_col5"]
                                                       <-Reducer 13 [SIMPLE_EDGE]
                                                         SHUFFLE [RS_69]
                                                           PartitionCols:_col0
-                                                          Merge Join Operator [MERGEJOIN_262] (rows=44000000 width=1014)
-                                                            Conds:RS_295._col1, _col2=RS_298._col0, _col1(Inner),Output:["_col0"]
+                                                          Merge Join Operator [MERGEJOIN_264] (rows=44000000 width=1014)
+                                                            Conds:RS_297._col1, _col2=RS_300._col0, _col1(Inner),Output:["_col0"]
                                                           <-Map 12 [SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_295]
+                                                            SHUFFLE [RS_297]
                                                               PartitionCols:_col1, _col2
-                                                              Select Operator [SEL_294] (rows=40000000 width=1014)
+                                                              Select Operator [SEL_296] (rows=40000000 width=1014)
                                                                 Output:["_col0","_col1","_col2"]
-                                                                Filter Operator [FIL_293] (rows=40000000 width=1014)
+                                                                Filter Operator [FIL_295] (rows=40000000 width=1014)
                                                                   predicate:(ca_address_sk is not null and ca_county is not null and ca_state is not null)
                                                                   TableScan [TS_29] (rows=40000000 width=1014)
                                                                     default@customer_address,customer_address,Tbl:COMPLETE,Col:NONE,Output:["ca_address_sk","ca_county","ca_state"]
                                                           <-Map 15 [SIMPLE_EDGE] vectorized
-                                                            SHUFFLE [RS_298]
+                                                            SHUFFLE [RS_300]
                                                               PartitionCols:_col0, _col1
-                                                              Select Operator [SEL_297] (rows=1704 width=1910)
+                                                              Select Operator [SEL_299] (rows=1704 width=1910)
                                                                 Output:["_col0","_col1"]
-                                                                Filter Operator [FIL_296] (rows=1704 width=1910)
+                                                                Filter Operator [FIL_298] (rows=1704 width=1910)
                                                                   predicate:(s_county is not null and s_state is not null)
                                                                   TableScan [TS_32] (rows=1704 width=1910)
                                                                     default@store,store,Tbl:COMPLETE,Col:NONE,Output:["s_county","s_state"]
                                                       <-Reducer 21 [SIMPLE_EDGE] vectorized
-                                                        SHUFFLE [RS_319]
+                                                        SHUFFLE [RS_321]
                                                           PartitionCols:_col1
-                                                          Select Operator [SEL_318] (rows=287491029 width=135)
+                                                          Select Operator [SEL_320] (rows=287491029 width=135)
                                                             Output:["_col0","_col1"]
-                                                            Group By Operator [GBY_317] (rows=287491029 width=135)
+                                                            Group By Operator [GBY_319] (rows=287491029 width=135)
                                                               Output:["_col0","_col1"],keys:KEY._col0, KEY._col1
                                                             <-Reducer 20 [SIMPLE_EDGE]
                                                               SHUFFLE [RS_63]
                                                                 PartitionCols:_col0, _col1
                                                                 Group By Operator [GBY_62] (rows=574982058 width=135)
                                                                   Output:["_col0","_col1"],keys:_col10, _col9
-                                                                  Merge Join Operator [MERGEJOIN_265] (rows=574982058 width=135)
-                                                                    Conds:RS_58._col1=RS_313._col0(Inner),Output:["_col9","_col10"]
+                                                                  Merge Join Operator [MERGEJOIN_267] (rows=574982058 width=135)
+                                                                    Conds:RS_58._col1=RS_315._col0(Inner),Output:["_col9","_col10"]
                                                                   <-Map 27 [SIMPLE_EDGE] vectorized
-                                                                    PARTITION_ONLY_SHUFFLE [RS_313]
+                                                                    PARTITION_ONLY_SHUFFLE [RS_315]
                                                                       PartitionCols:_col0
-                                                                      Select Operator [SEL_312] (rows=80000000 width=860)
+                                                                      Select Operator [SEL_314] (rows=80000000 width=860)
                                                                         Output:["_col0","_col1"]
-                                                                        Filter Operator [FIL_311] (rows=80000000 width=860)
+                                                                        Filter Operator [FIL_313] (rows=80000000 width=860)
                                                                           predicate:(c_current_addr_sk is not null and c_customer_sk is not null)
                                                                           TableScan [TS_49] (rows=80000000 width=860)
                                                                             default@customer,customer,Tbl:COMPLETE,Col:NONE,Output:["c_customer_sk","c_current_addr_sk"]
                                                                   <-Reducer 19 [SIMPLE_EDGE]
                                                                     SHUFFLE [RS_58]
                                                                       PartitionCols:_col1
-                                                                      Merge Join Operator [MERGEJOIN_264] (rows=522710951 width=135)
-                                                                        Conds:RS_55._col2=RS_307._col0(Inner),Output:["_col1"]
+                                                                      Merge Join Operator [MERGEJOIN_266] (rows=522710951 width=135)
+                                                                        Conds:RS_55._col2=RS_309._col0(Inner),Output:["_col1"]
                                                                       <-Map 25 [SIMPLE_EDGE] vectorized
-                                                                        PARTITION_ONLY_SHUFFLE [RS_307]
+                                                                        PARTITION_ONLY_SHUFFLE [RS_309]
                                                                           PartitionCols:_col0
-                                                                          Select Operator [SEL_306] (rows=115500 width=1436)
+                                                                          Select Operator [SEL_308] (rows=115500 width=1436)
                                                                             Output:["_col0"]
-                                                                            Filter Operator [FIL_305] (rows=115500 width=1436)
+                                                                            Filter Operator [FIL_307] (rows=115500 width=1436)
                                                                               predicate:((i_category = 'Jewelry') and (i_class = 'consignment') and i_item_sk is not null)
                                                                               TableScan [TS_46] (rows=462000 width=1436)
                                                                                 default@item,item,Tbl:COMPLETE,Col:NONE,Output:["i_item_sk","i_class","i_category"]
                                                                       <-Reducer 18 [SIMPLE_EDGE]
                                                                         SHUFFLE [RS_55]
                                                                           PartitionCols:_col2
-                                                                          Merge Join Operator [MERGEJOIN_263] (rows=475191764 width=135)
-                                                                            Conds:Union 17._col0=RS_301._col0(Inner),Output:["_col1","_col2"]
+                                                                          Merge Join Operator [MERGEJOIN_265] (rows=475191764 width=135)
+                                                                            Conds:Union 17._col0=RS_303._col0(Inner),Output:["_col1","_col2"]
                                                                           <-Map 23 [SIMPLE_EDGE] vectorized
-                                                                            PARTITION_ONLY_SHUFFLE [RS_301]
+                                                                            PARTITION_ONLY_SHUFFLE [RS_303]
                                                                               PartitionCols:_col0
-                                                                              Select Operator [SEL_300] (rows=18262 width=1119)
+                                                                              Select Operator [SEL_302] (rows=18262 width=1119)
                                                                                 Output:["_col0"]
-                                                                                Filter Operator [FIL_299] (rows=18262 width=1119)
+                                                                                Filter Operator [FIL_301] (rows=18262 width=1119)
                                                                                   predicate:((d_moy = 3) and (d_year = 1999) and d_date_sk is not null)
                                                                                   TableScan [TS_43] (rows=73049 width=1119)
                                                                                     default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_year","d_moy"]
                                                                           <-Union 17 [SIMPLE_EDGE]
                                                                             <-Map 16 [CONTAINS] vectorized
-                                                                              Reduce Output Operator [RS_369]
+                                                                              Reduce Output Operator [RS_371]
                                                                                 PartitionCols:_col0
-                                                                                Select Operator [SEL_368] (rows=287989836 width=135)
+                                                                                Select Operator [SEL_370] (rows=287989836 width=135)
                                                                                   Output:["_col0","_col1","_col2"]
-                                                                                  Filter Operator [FIL_367] (rows=287989836 width=135)
+                                                                                  Filter Operator [FIL_369] (rows=287989836 width=135)
                                                                                     predicate:((cs_bill_customer_sk BETWEEN DynamicValue(RS_59_customer_c_customer_sk_min) AND DynamicValue(RS_59_customer_c_customer_sk_max) and in_bloom_filter(cs_bill_customer_sk, DynamicValue(RS_59_customer_c_customer_sk_bloom_filter))) and (cs_item_sk BETWEEN DynamicValue(RS_56_item_i_item_sk_min) AND DynamicValue(RS_56_item_i_item_sk_max) and in_bloom_filter(cs_item_sk, DynamicValue(RS_56_item_i_item_sk_bloom_filter))) and (cs_sold_date_sk BETWEEN DynamicValue(RS_53_date_dim_d_date_sk_min) AND DynamicValue(RS_53_date_dim_d_date_sk_max) and in_bloom_filter(cs_sold_date_sk, DynamicValue(RS_53_date_dim_d_date_sk_bloom_filter))) and cs_bill_customer_sk is not null and cs_item_sk is not null and cs_sold_date_sk is not null)
-                                                                                    TableScan [TS_272] (rows=287989836 width=135)
+                                                                                    TableScan [TS_274] (rows=287989836 width=135)
                                                                                       Output:["cs_sold_date_sk","cs_bill_customer_sk","cs_item_sk"]
                                                                                     <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                                                      BROADCAST [RS_360]
-                                                                                        Group By Operator [GBY_359] (rows=1 width=12)
+                                                                                      BROADCAST [RS_362]
+                                                                                        Group By Operator [GBY_361] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                                         <-Map 23 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                          PARTITION_ONLY_SHUFFLE [RS_304]
-                                                                                            Group By Operator [GBY_303] (rows=1 width=12)
+                                                                                          PARTITION_ONLY_SHUFFLE [RS_306]
+                                                                                            Group By Operator [GBY_305] (rows=1 width=12)
                                                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                              Select Operator [SEL_302] (rows=18262 width=1119)
+                                                                                              Select Operator [SEL_304] (rows=18262 width=1119)
                                                                                                 Output:["_col0"]
-                                                                                                 Please refer to the previous Select Operator [SEL_300]
+                                                                                                 Please refer to the previous Select Operator [SEL_302]
                                                                                     <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                                                      BROADCAST [RS_363]
-                                                                                        Group By Operator [GBY_362] (rows=1 width=12)
+                                                                                      BROADCAST [RS_365]
+                                                                                        Group By Operator [GBY_364] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                                         <-Map 25 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                          PARTITION_ONLY_SHUFFLE [RS_310]
-                                                                                            Group By Operator [GBY_309] (rows=1 width=12)
+                                                                                          PARTITION_ONLY_SHUFFLE [RS_312]
+                                                                                            Group By Operator [GBY_311] (rows=1 width=12)
                                                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                                              Select Operator [SEL_308] (rows=115500 width=1436)
+                                                                                              Select Operator [SEL_310] (rows=115500 width=1436)
                                                                                                 Output:["_col0"]
-                                                                                                 Please refer to the previous Select Operator [SEL_306]
+                                                                                                 Please refer to the previous Select Operator [SEL_308]
                                                                                     <-Reducer 28 [BROADCAST_EDGE] vectorized
-                                                                                      BROADCAST [RS_366]
-                                                                                        Group By Operator [GBY_365] (rows=1 width=12)
+                                                                                      BROADCAST [RS_368]
+                                                                                        Group By Operator [GBY_367] (rows=1 width=12)
                                                                                           Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=80000000)"]
                                                                                         <-Map 27 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                                          PARTITION_ONLY_SHUFFLE [RS_316]
-                                                                                            Group By Operator [GBY_315] (rows=1 width=12)
+                                                                                          PARTITION_ONLY_SHUFFLE [RS_318]
+                                                                                            Group By Operator [GBY_317] (rows=1 width=12)
                                                                                               Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=80000000)"]
-                                                                                              Select Operator [SEL_314] (rows=80000000 width=860)
+                                                                                              Select Operator [SEL_316] (rows=80000000 width=860)
                                                                                                 Output:["_col0"]
-                                                                                                 Please refer to the previous Select Operator [SEL_312]
+                                                                                                 Please refer to the previous Select Operator [SEL_314]
                                                                             <-Map 22 [CONTAINS] vectorized
-                                                                              Reduce Output Operator [RS_372]
+                                                                              Reduce Output Operator [RS_374]
                                                                                 PartitionCols:_col0
-                                                                                Select Operator [SEL_371] (rows=144002668 width=135)
+                                                                                Select Operator [SEL_373] (rows=144002668 width=135)
                                                                                   Output:["_col0","_col1","_col2"]
-                                                                                  Filter Operator [FIL_370] (rows=144002668 width=135)
+                                                                                  Filter Operator [FIL_372] (rows=144002668 width=135)
                                                                                     predicate:((ws_item_sk BETWEEN DynamicValue(RS_56_item_i_item_sk_min) AND DynamicValue(RS_56_item_i_item_sk_max) and in_bloom_filter(ws_item_sk, DynamicValue(RS_56_item_i_item_sk_bloom_filter))) and (ws_sold_date_sk BETWEEN DynamicValue(RS_53_date_dim_d_date_sk_min) AND DynamicValue(RS_53_date_dim_d_date_sk_max) and in_bloom_filter(ws_sold_date_sk, DynamicValue(RS_53_date_dim_d_date_sk_bloom_filter))) and ws_bill_customer_sk is not null and ws_item_sk is not null and ws_sold_date_sk is not null)
-                                                                                    TableScan [TS_277] (rows=144002668 width=135)
+                                                                                    TableScan [TS_279] (rows=144002668 width=135)
                                                                                       Output:["ws_sold_date_sk","ws_item_sk","ws_bill_customer_sk"]
                                                                                     <-Reducer 24 [BROADCAST_EDGE] vectorized
-                                                                                      BROADCAST [RS_361]
-                                                                                         Please refer to the previous Group By Operator [GBY_359]
+                                                                                      BROADCAST [RS_363]
+                                                                                         Please refer to the previous Group By Operator [GBY_361]
                                                                                     <-Reducer 26 [BROADCAST_EDGE] vectorized
-                                                                                      BROADCAST [RS_364]
-                                                                                         Please refer to the previous Group By Operator [GBY_362]
+                                                                                      BROADCAST [RS_366]
+                                                                                         Please refer to the previous Group By Operator [GBY_364]
                                                   <-Reducer 2 [SIMPLE_EDGE]
                                                     SHUFFLE [RS_99]
                                                       PartitionCols:_col1
-                                                      Merge Join Operator [MERGEJOIN_261] (rows=633595212 width=88)
-                                                        Conds:RS_292._col0=RS_284._col0(Inner),Output:["_col1","_col2","_col4"]
+                                                      Merge Join Operator [MERGEJOIN_263] (rows=633595212 width=88)
+                                                        Conds:RS_294._col0=RS_286._col0(Inner),Output:["_col1","_col2","_col4"]
                                                       <-Map 10 [SIMPLE_EDGE] vectorized
-                                                        SHUFFLE [RS_284]
+                                                        SHUFFLE [RS_286]
                                                           PartitionCols:_col0
-                                                          Select Operator [SEL_283] (rows=73049 width=1119)
+                                                          Select Operator [SEL_285] (rows=73049 width=1119)
                                                             Output:["_col0","_col1"]
-                                                            Filter Operator [FIL_282] (rows=73049 width=1119)
+                                                            Filter Operator [FIL_284] (rows=73049 width=1119)
                                                               predicate:d_date_sk is not null
                                                               TableScan [TS_26] (rows=73049 width=1119)
                                                                 default@date_dim,date_dim,Tbl:COMPLETE,Col:NONE,Output:["d_date_sk","d_month_seq"]
                                                       <-Map 1 [SIMPLE_EDGE] vectorized
-                                                        SHUFFLE [RS_292]
+                                                        SHUFFLE [RS_294]
                                                           PartitionCols:_col0
-                                                          Select Operator [SEL_291] (rows=575995635 width=88)
+                                                          Select Operator [SEL_293] (rows=575995635 width=88)
                                                             Output:["_col0","_col1","_col2"]
-                                                            Filter Operator [FIL_290] (rows=575995635 width=88)
+                                                            Filter Operator [FIL_292] (rows=575995635 width=88)
                                                               predicate:((ss_sold_date_sk BETWEEN DynamicValue(RS_97_date_dim_d_date_sk_min) AND DynamicValue(RS_97_date_dim_d_date_sk_max) and in_bloom_filter(ss_sold_date_sk, DynamicValue(RS_97_date_dim_d_date_sk_bloom_filter))) and ss_customer_sk is not null and ss_sold_date_sk is not null)
                                                               TableScan [TS_23] (rows=575995635 width=88)
                                                                 default@store_sales,store_sales,Tbl:COMPLETE,Col:NONE,Output:["ss_sold_date_sk","ss_customer_sk","ss_ext_sales_price"]
                                                               <-Reducer 11 [BROADCAST_EDGE] vectorized
-                                                                BROADCAST [RS_289]
-                                                                  Group By Operator [GBY_288] (rows=1 width=12)
+                                                                BROADCAST [RS_291]
+                                                                  Group By Operator [GBY_290] (rows=1 width=12)
                                                                     Output:["_col0","_col1","_col2"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","bloom_filter(VALUE._col2, expectedEntries=1000000)"]
                                                                   <-Map 10 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                                    SHUFFLE [RS_287]
-                                                                      Group By Operator [GBY_286] (rows=1 width=12)
+                                                                    SHUFFLE [RS_289]
+                                                                      Group By Operator [GBY_288] (rows=1 width=12)
                                                                         Output:["_col0","_col1","_col2"],aggregations:["min(_col0)","max(_col0)","bloom_filter(_col0, expectedEntries=1000000)"]
-                                                                        Select Operator [SEL_285] (rows=73049 width=1119)
+                                                                        Select Operator [SEL_287] (rows=73049 width=1119)
                                                                           Output:["_col0"]
-                                                                           Please refer to the previous Select Operator [SEL_283]
+                                                                           Please refer to the previous Select Operator [SEL_285]
                                               <-Reducer 31 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                PARTITION_ONLY_SHUFFLE [RS_338]
-                                                  Select Operator [SEL_337] (rows=1 width=8)
-                                                    Filter Operator [FIL_336] (rows=1 width=8)
+                                                PARTITION_ONLY_SHUFFLE [RS_340]
+                                                  Select Operator [SEL_339] (rows=1 width=8)
+                                                    Filter Operator [FIL_338] (rows=1 width=8)
                                                       predicate:(sq_count_check(_col0) <= 1)
-                                                      Group By Operator [GBY_335] (rows=1 width=8)
+                                                      Group By Operator [GBY_337] (rows=1 width=8)
                                                         Output:["_col0"],aggregations:["count(VALUE._col0)"]
                                                       <-Reducer 30 [CUSTOM_SIMPLE_EDGE] vectorized
-                                                        PARTITION_ONLY_SHUFFLE [RS_334]
-                                                          Group By Operator [GBY_333] (rows=1 width=8)
+                                                        PARTITION_ONLY_SHUFFLE [RS_336]
+                                                          Group By Operator [GBY_335] (rows=1 width=8)
                                                             Output:["_col0"],aggregations:["count()"]
-                                                            Select Operator [SEL_331] (rows=9131 width=1119)
-                                                               Please refer to the previous Group By Operator [GBY_330]
+                                                            Select Operator [SEL_333] (rows=9131 width=1119)
+                                                               Please refer to the previous Group By Operator [GBY_332]
 


[41/48] hive git commit: HIVE-20165: Enable ZLIB for streaming ingest (Prasanth Jayachandran, via Gopal V)

Posted by se...@apache.org.
HIVE-20165: Enable ZLIB for streaming ingest (Prasanth Jayachandran, via Gopal V)

Signed-off-by: Gopal V <go...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bac1d98c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bac1d98c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bac1d98c

Branch: refs/heads/master-txnstats
Commit: bac1d98c5e91cdb39567f21b2068e7951a93ef44
Parents: 4fcf3d7
Author: Prasanth Jayachandran <pr...@apache.org>
Authored: Tue Jul 17 20:17:55 2018 -0700
Committer: Gopal V <go...@apache.org>
Committed: Tue Jul 17 20:17:55 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java |  1 -
 .../apache/hive/streaming/TestStreaming.java    | 21 ++++++++++----------
 2 files changed, 10 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bac1d98c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
index 5590470..98f5df1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
@@ -342,7 +342,6 @@ public class OrcRecordUpdater implements RecordUpdater {
       writerOptions.blockPadding(false);
       if (optionsCloneForDelta.getConfiguration().getBoolean(
         HiveConf.ConfVars.HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED.varname, false)) {
-        writerOptions.compress(CompressionKind.NONE);
         writerOptions.encodingStrategy(org.apache.orc.OrcFile.EncodingStrategy.SPEED);
         writerOptions.rowIndexStride(0);
         writerOptions.getConfiguration().set(OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute(), "-1.0");

http://git-wip-us.apache.org/repos/asf/hive/blob/bac1d98c/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
----------------------------------------------------------------------
diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
index 1f05d88..8b5e508 100644
--- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
+++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
@@ -2072,20 +2072,19 @@ public class TestStreaming {
     System.setOut(origOut);
 
     String outDump = new String(myOut.toByteArray());
-    // make sure delta files are written with no indexes, no compression and no dictionary
-    // no compression
-    Assert.assertEquals(true, outDump.contains("Compression: NONE"));
+    // make sure delta files are written with no indexes and no dictionary
+    Assert.assertEquals(true, outDump.contains("Compression: ZLIB"));
     // no stats/indexes
     Assert.assertEquals(true, outDump.contains("Column 0: count: 0 hasNull: false"));
-    Assert.assertEquals(true, outDump.contains("Column 1: count: 0 hasNull: false bytesOnDisk: 12 sum: 0"));
-    Assert.assertEquals(true, outDump.contains("Column 2: count: 0 hasNull: false bytesOnDisk: 12 sum: 0"));
-    Assert.assertEquals(true, outDump.contains("Column 3: count: 0 hasNull: false bytesOnDisk: 24 sum: 0"));
-    Assert.assertEquals(true, outDump.contains("Column 4: count: 0 hasNull: false bytesOnDisk: 14 sum: 0"));
-    Assert.assertEquals(true, outDump.contains("Column 5: count: 0 hasNull: false bytesOnDisk: 12 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 1: count: 0 hasNull: false bytesOnDisk: 15 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 2: count: 0 hasNull: false bytesOnDisk: 15 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 3: count: 0 hasNull: false bytesOnDisk: 19 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 4: count: 0 hasNull: false bytesOnDisk: 17 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 5: count: 0 hasNull: false bytesOnDisk: 15 sum: 0"));
     Assert.assertEquals(true, outDump.contains("Column 6: count: 0 hasNull: false"));
-    Assert.assertEquals(true, outDump.contains("Column 7: count: 0 hasNull: false bytesOnDisk: 11864"));
-    Assert.assertEquals(true, outDump.contains("Column 8: count: 0 hasNull: false bytesOnDisk: 2033 sum: 0"));
-    Assert.assertEquals(true, outDump.contains("Column 9: count: 0 hasNull: false bytesOnDisk: 13629"));
+    Assert.assertEquals(true, outDump.contains("Column 7: count: 0 hasNull: false bytesOnDisk: 3929"));
+    Assert.assertEquals(true, outDump.contains("Column 8: count: 0 hasNull: false bytesOnDisk: 1484 sum: 0"));
+    Assert.assertEquals(true, outDump.contains("Column 9: count: 0 hasNull: false bytesOnDisk: 816"));
     // no dictionary
     Assert.assertEquals(true, outDump.contains("Encoding column 7: DIRECT_V2"));
     Assert.assertEquals(true, outDump.contains("Encoding column 9: DIRECT_V2"));


[43/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index f4c5002,2ae6d9a..913bbf0
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@@ -16400,10 -16045,10 +16400,10 @@@ class get_databases_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype854, _size851) = iprot.readListBegin()
-           for _i855 in xrange(_size851):
-             _elem856 = iprot.readString()
-             self.success.append(_elem856)
 -          (_etype833, _size830) = iprot.readListBegin()
 -          for _i834 in xrange(_size830):
 -            _elem835 = iprot.readString()
 -            self.success.append(_elem835)
++          (_etype847, _size844) = iprot.readListBegin()
++          for _i848 in xrange(_size844):
++            _elem849 = iprot.readString()
++            self.success.append(_elem849)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -16426,8 -16071,8 +16426,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter857 in self.success:
-         oprot.writeString(iter857)
 -      for iter836 in self.success:
 -        oprot.writeString(iter836)
++      for iter850 in self.success:
++        oprot.writeString(iter850)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -16532,10 -16177,10 +16532,10 @@@ class get_all_databases_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype861, _size858) = iprot.readListBegin()
-           for _i862 in xrange(_size858):
-             _elem863 = iprot.readString()
-             self.success.append(_elem863)
 -          (_etype840, _size837) = iprot.readListBegin()
 -          for _i841 in xrange(_size837):
 -            _elem842 = iprot.readString()
 -            self.success.append(_elem842)
++          (_etype854, _size851) = iprot.readListBegin()
++          for _i855 in xrange(_size851):
++            _elem856 = iprot.readString()
++            self.success.append(_elem856)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -16558,8 -16203,8 +16558,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter864 in self.success:
-         oprot.writeString(iter864)
 -      for iter843 in self.success:
 -        oprot.writeString(iter843)
++      for iter857 in self.success:
++        oprot.writeString(iter857)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -17329,12 -16974,12 +17329,12 @@@ class get_type_all_result
        if fid == 0:
          if ftype == TType.MAP:
            self.success = {}
-           (_ktype866, _vtype867, _size865 ) = iprot.readMapBegin()
-           for _i869 in xrange(_size865):
-             _key870 = iprot.readString()
-             _val871 = Type()
-             _val871.read(iprot)
-             self.success[_key870] = _val871
 -          (_ktype845, _vtype846, _size844 ) = iprot.readMapBegin()
 -          for _i848 in xrange(_size844):
 -            _key849 = iprot.readString()
 -            _val850 = Type()
 -            _val850.read(iprot)
 -            self.success[_key849] = _val850
++          (_ktype859, _vtype860, _size858 ) = iprot.readMapBegin()
++          for _i862 in xrange(_size858):
++            _key863 = iprot.readString()
++            _val864 = Type()
++            _val864.read(iprot)
++            self.success[_key863] = _val864
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -17357,9 -17002,9 +17357,9 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.MAP, 0)
        oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
-       for kiter872,viter873 in self.success.items():
-         oprot.writeString(kiter872)
-         viter873.write(oprot)
 -      for kiter851,viter852 in self.success.items():
 -        oprot.writeString(kiter851)
 -        viter852.write(oprot)
++      for kiter865,viter866 in self.success.items():
++        oprot.writeString(kiter865)
++        viter866.write(oprot)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.o2 is not None:
@@@ -17502,11 -17147,11 +17502,11 @@@ class get_fields_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype877, _size874) = iprot.readListBegin()
-           for _i878 in xrange(_size874):
-             _elem879 = FieldSchema()
-             _elem879.read(iprot)
-             self.success.append(_elem879)
 -          (_etype856, _size853) = iprot.readListBegin()
 -          for _i857 in xrange(_size853):
 -            _elem858 = FieldSchema()
 -            _elem858.read(iprot)
 -            self.success.append(_elem858)
++          (_etype870, _size867) = iprot.readListBegin()
++          for _i871 in xrange(_size867):
++            _elem872 = FieldSchema()
++            _elem872.read(iprot)
++            self.success.append(_elem872)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17541,8 -17186,8 +17541,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter880 in self.success:
-         iter880.write(oprot)
 -      for iter859 in self.success:
 -        iter859.write(oprot)
++      for iter873 in self.success:
++        iter873.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -17709,11 -17354,11 +17709,11 @@@ class get_fields_with_environment_conte
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype884, _size881) = iprot.readListBegin()
-           for _i885 in xrange(_size881):
-             _elem886 = FieldSchema()
-             _elem886.read(iprot)
-             self.success.append(_elem886)
 -          (_etype863, _size860) = iprot.readListBegin()
 -          for _i864 in xrange(_size860):
 -            _elem865 = FieldSchema()
 -            _elem865.read(iprot)
 -            self.success.append(_elem865)
++          (_etype877, _size874) = iprot.readListBegin()
++          for _i878 in xrange(_size874):
++            _elem879 = FieldSchema()
++            _elem879.read(iprot)
++            self.success.append(_elem879)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17748,8 -17393,8 +17748,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter887 in self.success:
-         iter887.write(oprot)
 -      for iter866 in self.success:
 -        iter866.write(oprot)
++      for iter880 in self.success:
++        iter880.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -17902,11 -17547,11 +17902,11 @@@ class get_schema_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype891, _size888) = iprot.readListBegin()
-           for _i892 in xrange(_size888):
-             _elem893 = FieldSchema()
-             _elem893.read(iprot)
-             self.success.append(_elem893)
 -          (_etype870, _size867) = iprot.readListBegin()
 -          for _i871 in xrange(_size867):
 -            _elem872 = FieldSchema()
 -            _elem872.read(iprot)
 -            self.success.append(_elem872)
++          (_etype884, _size881) = iprot.readListBegin()
++          for _i885 in xrange(_size881):
++            _elem886 = FieldSchema()
++            _elem886.read(iprot)
++            self.success.append(_elem886)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -17941,8 -17586,8 +17941,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter894 in self.success:
-         iter894.write(oprot)
 -      for iter873 in self.success:
 -        iter873.write(oprot)
++      for iter887 in self.success:
++        iter887.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -18109,11 -17754,11 +18109,11 @@@ class get_schema_with_environment_conte
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype898, _size895) = iprot.readListBegin()
-           for _i899 in xrange(_size895):
-             _elem900 = FieldSchema()
-             _elem900.read(iprot)
-             self.success.append(_elem900)
 -          (_etype877, _size874) = iprot.readListBegin()
 -          for _i878 in xrange(_size874):
 -            _elem879 = FieldSchema()
 -            _elem879.read(iprot)
 -            self.success.append(_elem879)
++          (_etype891, _size888) = iprot.readListBegin()
++          for _i892 in xrange(_size888):
++            _elem893 = FieldSchema()
++            _elem893.read(iprot)
++            self.success.append(_elem893)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -18148,8 -17793,8 +18148,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter901 in self.success:
-         iter901.write(oprot)
 -      for iter880 in self.success:
 -        iter880.write(oprot)
++      for iter894 in self.success:
++        iter894.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -18602,66 -18247,66 +18602,66 @@@ class create_table_with_constraints_arg
        elif fid == 2:
          if ftype == TType.LIST:
            self.primaryKeys = []
-           (_etype905, _size902) = iprot.readListBegin()
-           for _i906 in xrange(_size902):
-             _elem907 = SQLPrimaryKey()
-             _elem907.read(iprot)
-             self.primaryKeys.append(_elem907)
 -          (_etype884, _size881) = iprot.readListBegin()
 -          for _i885 in xrange(_size881):
 -            _elem886 = SQLPrimaryKey()
 -            _elem886.read(iprot)
 -            self.primaryKeys.append(_elem886)
++          (_etype898, _size895) = iprot.readListBegin()
++          for _i899 in xrange(_size895):
++            _elem900 = SQLPrimaryKey()
++            _elem900.read(iprot)
++            self.primaryKeys.append(_elem900)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 3:
          if ftype == TType.LIST:
            self.foreignKeys = []
-           (_etype911, _size908) = iprot.readListBegin()
-           for _i912 in xrange(_size908):
-             _elem913 = SQLForeignKey()
-             _elem913.read(iprot)
-             self.foreignKeys.append(_elem913)
 -          (_etype890, _size887) = iprot.readListBegin()
 -          for _i891 in xrange(_size887):
 -            _elem892 = SQLForeignKey()
 -            _elem892.read(iprot)
 -            self.foreignKeys.append(_elem892)
++          (_etype904, _size901) = iprot.readListBegin()
++          for _i905 in xrange(_size901):
++            _elem906 = SQLForeignKey()
++            _elem906.read(iprot)
++            self.foreignKeys.append(_elem906)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 4:
          if ftype == TType.LIST:
            self.uniqueConstraints = []
-           (_etype917, _size914) = iprot.readListBegin()
-           for _i918 in xrange(_size914):
-             _elem919 = SQLUniqueConstraint()
-             _elem919.read(iprot)
-             self.uniqueConstraints.append(_elem919)
 -          (_etype896, _size893) = iprot.readListBegin()
 -          for _i897 in xrange(_size893):
 -            _elem898 = SQLUniqueConstraint()
 -            _elem898.read(iprot)
 -            self.uniqueConstraints.append(_elem898)
++          (_etype910, _size907) = iprot.readListBegin()
++          for _i911 in xrange(_size907):
++            _elem912 = SQLUniqueConstraint()
++            _elem912.read(iprot)
++            self.uniqueConstraints.append(_elem912)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 5:
          if ftype == TType.LIST:
            self.notNullConstraints = []
-           (_etype923, _size920) = iprot.readListBegin()
-           for _i924 in xrange(_size920):
-             _elem925 = SQLNotNullConstraint()
-             _elem925.read(iprot)
-             self.notNullConstraints.append(_elem925)
 -          (_etype902, _size899) = iprot.readListBegin()
 -          for _i903 in xrange(_size899):
 -            _elem904 = SQLNotNullConstraint()
 -            _elem904.read(iprot)
 -            self.notNullConstraints.append(_elem904)
++          (_etype916, _size913) = iprot.readListBegin()
++          for _i917 in xrange(_size913):
++            _elem918 = SQLNotNullConstraint()
++            _elem918.read(iprot)
++            self.notNullConstraints.append(_elem918)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 6:
          if ftype == TType.LIST:
            self.defaultConstraints = []
-           (_etype929, _size926) = iprot.readListBegin()
-           for _i930 in xrange(_size926):
-             _elem931 = SQLDefaultConstraint()
-             _elem931.read(iprot)
-             self.defaultConstraints.append(_elem931)
 -          (_etype908, _size905) = iprot.readListBegin()
 -          for _i909 in xrange(_size905):
 -            _elem910 = SQLDefaultConstraint()
 -            _elem910.read(iprot)
 -            self.defaultConstraints.append(_elem910)
++          (_etype922, _size919) = iprot.readListBegin()
++          for _i923 in xrange(_size919):
++            _elem924 = SQLDefaultConstraint()
++            _elem924.read(iprot)
++            self.defaultConstraints.append(_elem924)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 7:
          if ftype == TType.LIST:
            self.checkConstraints = []
-           (_etype935, _size932) = iprot.readListBegin()
-           for _i936 in xrange(_size932):
-             _elem937 = SQLCheckConstraint()
-             _elem937.read(iprot)
-             self.checkConstraints.append(_elem937)
 -          (_etype914, _size911) = iprot.readListBegin()
 -          for _i915 in xrange(_size911):
 -            _elem916 = SQLCheckConstraint()
 -            _elem916.read(iprot)
 -            self.checkConstraints.append(_elem916)
++          (_etype928, _size925) = iprot.readListBegin()
++          for _i929 in xrange(_size925):
++            _elem930 = SQLCheckConstraint()
++            _elem930.read(iprot)
++            self.checkConstraints.append(_elem930)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -18682,43 -18327,43 +18682,43 @@@
      if self.primaryKeys is not None:
        oprot.writeFieldBegin('primaryKeys', TType.LIST, 2)
        oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys))
-       for iter938 in self.primaryKeys:
-         iter938.write(oprot)
 -      for iter917 in self.primaryKeys:
 -        iter917.write(oprot)
++      for iter931 in self.primaryKeys:
++        iter931.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.foreignKeys is not None:
        oprot.writeFieldBegin('foreignKeys', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys))
-       for iter939 in self.foreignKeys:
-         iter939.write(oprot)
 -      for iter918 in self.foreignKeys:
 -        iter918.write(oprot)
++      for iter932 in self.foreignKeys:
++        iter932.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.uniqueConstraints is not None:
        oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4)
        oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints))
-       for iter940 in self.uniqueConstraints:
-         iter940.write(oprot)
 -      for iter919 in self.uniqueConstraints:
 -        iter919.write(oprot)
++      for iter933 in self.uniqueConstraints:
++        iter933.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.notNullConstraints is not None:
        oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5)
        oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints))
-       for iter941 in self.notNullConstraints:
-         iter941.write(oprot)
 -      for iter920 in self.notNullConstraints:
 -        iter920.write(oprot)
++      for iter934 in self.notNullConstraints:
++        iter934.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.defaultConstraints is not None:
        oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6)
        oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints))
-       for iter942 in self.defaultConstraints:
-         iter942.write(oprot)
 -      for iter921 in self.defaultConstraints:
 -        iter921.write(oprot)
++      for iter935 in self.defaultConstraints:
++        iter935.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.checkConstraints is not None:
        oprot.writeFieldBegin('checkConstraints', TType.LIST, 7)
        oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints))
-       for iter943 in self.checkConstraints:
-         iter943.write(oprot)
 -      for iter922 in self.checkConstraints:
 -        iter922.write(oprot)
++      for iter936 in self.checkConstraints:
++        iter936.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -20278,10 -19923,10 +20278,10 @@@ class truncate_table_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.partNames = []
-           (_etype947, _size944) = iprot.readListBegin()
-           for _i948 in xrange(_size944):
-             _elem949 = iprot.readString()
-             self.partNames.append(_elem949)
 -          (_etype926, _size923) = iprot.readListBegin()
 -          for _i927 in xrange(_size923):
 -            _elem928 = iprot.readString()
 -            self.partNames.append(_elem928)
++          (_etype940, _size937) = iprot.readListBegin()
++          for _i941 in xrange(_size937):
++            _elem942 = iprot.readString()
++            self.partNames.append(_elem942)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20306,8 -19951,8 +20306,8 @@@
      if self.partNames is not None:
        oprot.writeFieldBegin('partNames', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.partNames))
-       for iter950 in self.partNames:
-         oprot.writeString(iter950)
 -      for iter929 in self.partNames:
 -        oprot.writeString(iter929)
++      for iter943 in self.partNames:
++        oprot.writeString(iter943)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -20829,10 -20665,11 +20829,10 @@@ class get_tables_by_type_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype961, _size958) = iprot.readListBegin()
--          for _i962 in xrange(_size958):
-             _elem963 = iprot.readString()
 -            _elem963 = TableMeta()
 -            _elem963.read(iprot)
--            self.success.append(_elem963)
++          (_etype954, _size951) = iprot.readListBegin()
++          for _i955 in xrange(_size951):
++            _elem956 = iprot.readString()
++            self.success.append(_elem956)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -20851,12 -20688,12 +20851,12 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('get_table_meta_result')
 +    oprot.writeStructBegin('get_tables_by_type_result')
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
 -      oprot.writeListBegin(TType.STRUCT, len(self.success))
 -      for iter964 in self.success:
 -        iter964.write(oprot)
 +      oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter964 in self.success:
-         oprot.writeString(iter964)
++      for iter957 in self.success:
++        oprot.writeString(iter957)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -20980,10 -20817,10 +20980,10 @@@ class get_materialized_views_for_rewrit
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
--          (_etype968, _size965) = iprot.readListBegin()
--          for _i969 in xrange(_size965):
--            _elem970 = iprot.readString()
--            self.success.append(_elem970)
++          (_etype961, _size958) = iprot.readListBegin()
++          for _i962 in xrange(_size958):
++            _elem963 = iprot.readString()
++            self.success.append(_elem963)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21006,8 -20843,8 +21006,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
--      for iter971 in self.success:
--        oprot.writeString(iter971)
++      for iter964 in self.success:
++        oprot.writeString(iter964)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -21074,17 -20908,7 +21074,17 @@@ class get_table_meta_args
            iprot.skip(ftype)
        elif fid == 2:
          if ftype == TType.STRING:
 -          self.tbl_name = iprot.readString()
 +          self.tbl_patterns = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 3:
 +        if ftype == TType.LIST:
 +          self.tbl_types = []
-           (_etype975, _size972) = iprot.readListBegin()
-           for _i976 in xrange(_size972):
-             _elem977 = iprot.readString()
-             self.tbl_types.append(_elem977)
++          (_etype968, _size965) = iprot.readListBegin()
++          for _i969 in xrange(_size965):
++            _elem970 = iprot.readString()
++            self.tbl_types.append(_elem970)
 +          iprot.readListEnd()
          else:
            iprot.skip(ftype)
        else:
@@@ -21096,21 -20920,14 +21096,21 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('get_table_args')
 -    if self.dbname is not None:
 -      oprot.writeFieldBegin('dbname', TType.STRING, 1)
 -      oprot.writeString(self.dbname)
 +    oprot.writeStructBegin('get_table_meta_args')
 +    if self.db_patterns is not None:
 +      oprot.writeFieldBegin('db_patterns', TType.STRING, 1)
 +      oprot.writeString(self.db_patterns)
        oprot.writeFieldEnd()
 -    if self.tbl_name is not None:
 -      oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
 -      oprot.writeString(self.tbl_name)
 +    if self.tbl_patterns is not None:
 +      oprot.writeFieldBegin('tbl_patterns', TType.STRING, 2)
 +      oprot.writeString(self.tbl_patterns)
 +      oprot.writeFieldEnd()
 +    if self.tbl_types is not None:
 +      oprot.writeFieldBegin('tbl_types', TType.LIST, 3)
 +      oprot.writeListBegin(TType.STRING, len(self.tbl_types))
-       for iter978 in self.tbl_types:
-         oprot.writeString(iter978)
++      for iter971 in self.tbl_types:
++        oprot.writeString(iter971)
 +      oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()
@@@ -21163,14 -20982,9 +21163,14 @@@ class get_table_meta_result
        if ftype == TType.STOP:
          break
        if fid == 0:
 -        if ftype == TType.STRUCT:
 -          self.success = Table()
 -          self.success.read(iprot)
 +        if ftype == TType.LIST:
 +          self.success = []
-           (_etype982, _size979) = iprot.readListBegin()
-           for _i983 in xrange(_size979):
-             _elem984 = TableMeta()
-             _elem984.read(iprot)
-             self.success.append(_elem984)
++          (_etype975, _size972) = iprot.readListBegin()
++          for _i976 in xrange(_size972):
++            _elem977 = TableMeta()
++            _elem977.read(iprot)
++            self.success.append(_elem977)
 +          iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 1:
@@@ -21188,13 -21008,10 +21188,13 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('get_table_result')
 +    oprot.writeStructBegin('get_table_meta_result')
      if self.success is not None:
 -      oprot.writeFieldBegin('success', TType.STRUCT, 0)
 -      self.success.write(oprot)
 +      oprot.writeFieldBegin('success', TType.LIST, 0)
 +      oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter985 in self.success:
-         iter985.write(oprot)
++      for iter978 in self.success:
++        iter978.write(oprot)
 +      oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
        oprot.writeFieldBegin('o1', TType.STRUCT, 1)
@@@ -21317,10 -21157,11 +21317,10 @@@ class get_all_tables_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype989, _size986) = iprot.readListBegin()
-           for _i990 in xrange(_size986):
-             _elem991 = iprot.readString()
-             self.success.append(_elem991)
+           (_etype982, _size979) = iprot.readListBegin()
+           for _i983 in xrange(_size979):
 -            _elem984 = Table()
 -            _elem984.read(iprot)
++            _elem984 = iprot.readString()
+             self.success.append(_elem984)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -21339,18 -21174,14 +21339,18 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('get_table_objects_by_name_result')
 +    oprot.writeStructBegin('get_all_tables_result')
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
 -      oprot.writeListBegin(TType.STRUCT, len(self.success))
 +      oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter992 in self.success:
-         oprot.writeString(iter992)
+       for iter985 in self.success:
 -        iter985.write(oprot)
++        oprot.writeString(iter985)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
 +    if self.o1 is not None:
 +      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
 +      self.o1.write(oprot)
 +      oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()
  
@@@ -21483,325 -21301,7 +21483,325 @@@ class get_table_result
          break
        if fid == 0:
          if ftype == TType.STRUCT:
 -          self.success = GetTableResult()
 +          self.success = Table()
 +          self.success.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 1:
 +        if ftype == TType.STRUCT:
 +          self.o1 = MetaException()
 +          self.o1.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 2:
 +        if ftype == TType.STRUCT:
 +          self.o2 = NoSuchObjectException()
 +          self.o2.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('get_table_result')
 +    if self.success is not None:
 +      oprot.writeFieldBegin('success', TType.STRUCT, 0)
 +      self.success.write(oprot)
 +      oprot.writeFieldEnd()
 +    if self.o1 is not None:
 +      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
 +      self.o1.write(oprot)
 +      oprot.writeFieldEnd()
 +    if self.o2 is not None:
 +      oprot.writeFieldBegin('o2', TType.STRUCT, 2)
 +      self.o2.write(oprot)
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.success)
 +    value = (value * 31) ^ hash(self.o1)
 +    value = (value * 31) ^ hash(self.o2)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class get_table_objects_by_name_args:
 +  """
 +  Attributes:
 +   - dbname
 +   - tbl_names
 +  """
 +
 +  thrift_spec = (
 +    None, # 0
 +    (1, TType.STRING, 'dbname', None, None, ), # 1
 +    (2, TType.LIST, 'tbl_names', (TType.STRING,None), None, ), # 2
 +  )
 +
 +  def __init__(self, dbname=None, tbl_names=None,):
 +    self.dbname = dbname
 +    self.tbl_names = tbl_names
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 1:
 +        if ftype == TType.STRING:
 +          self.dbname = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 2:
 +        if ftype == TType.LIST:
 +          self.tbl_names = []
-           (_etype996, _size993) = iprot.readListBegin()
-           for _i997 in xrange(_size993):
-             _elem998 = iprot.readString()
-             self.tbl_names.append(_elem998)
++          (_etype989, _size986) = iprot.readListBegin()
++          for _i990 in xrange(_size986):
++            _elem991 = iprot.readString()
++            self.tbl_names.append(_elem991)
 +          iprot.readListEnd()
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('get_table_objects_by_name_args')
 +    if self.dbname is not None:
 +      oprot.writeFieldBegin('dbname', TType.STRING, 1)
 +      oprot.writeString(self.dbname)
 +      oprot.writeFieldEnd()
 +    if self.tbl_names is not None:
 +      oprot.writeFieldBegin('tbl_names', TType.LIST, 2)
 +      oprot.writeListBegin(TType.STRING, len(self.tbl_names))
-       for iter999 in self.tbl_names:
-         oprot.writeString(iter999)
++      for iter992 in self.tbl_names:
++        oprot.writeString(iter992)
 +      oprot.writeListEnd()
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.dbname)
 +    value = (value * 31) ^ hash(self.tbl_names)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class get_table_objects_by_name_result:
 +  """
 +  Attributes:
 +   - success
 +  """
 +
 +  thrift_spec = (
 +    (0, TType.LIST, 'success', (TType.STRUCT,(Table, Table.thrift_spec)), None, ), # 0
 +  )
 +
 +  def __init__(self, success=None,):
 +    self.success = success
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 0:
 +        if ftype == TType.LIST:
 +          self.success = []
-           (_etype1003, _size1000) = iprot.readListBegin()
-           for _i1004 in xrange(_size1000):
-             _elem1005 = Table()
-             _elem1005.read(iprot)
-             self.success.append(_elem1005)
++          (_etype996, _size993) = iprot.readListBegin()
++          for _i997 in xrange(_size993):
++            _elem998 = Table()
++            _elem998.read(iprot)
++            self.success.append(_elem998)
 +          iprot.readListEnd()
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('get_table_objects_by_name_result')
 +    if self.success is not None:
 +      oprot.writeFieldBegin('success', TType.LIST, 0)
 +      oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1006 in self.success:
-         iter1006.write(oprot)
++      for iter999 in self.success:
++        iter999.write(oprot)
 +      oprot.writeListEnd()
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.success)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class get_table_req_args:
 +  """
 +  Attributes:
 +   - req
 +  """
 +
 +  thrift_spec = (
 +    None, # 0
 +    (1, TType.STRUCT, 'req', (GetTableRequest, GetTableRequest.thrift_spec), None, ), # 1
 +  )
 +
 +  def __init__(self, req=None,):
 +    self.req = req
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 1:
 +        if ftype == TType.STRUCT:
 +          self.req = GetTableRequest()
 +          self.req.read(iprot)
 +        else:
 +          iprot.skip(ftype)
 +      else:
 +        iprot.skip(ftype)
 +      iprot.readFieldEnd()
 +    iprot.readStructEnd()
 +
 +  def write(self, oprot):
 +    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
 +      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
 +      return
 +    oprot.writeStructBegin('get_table_req_args')
 +    if self.req is not None:
 +      oprot.writeFieldBegin('req', TType.STRUCT, 1)
 +      self.req.write(oprot)
 +      oprot.writeFieldEnd()
 +    oprot.writeFieldStop()
 +    oprot.writeStructEnd()
 +
 +  def validate(self):
 +    return
 +
 +
 +  def __hash__(self):
 +    value = 17
 +    value = (value * 31) ^ hash(self.req)
 +    return value
 +
 +  def __repr__(self):
 +    L = ['%s=%r' % (key, value)
 +      for key, value in self.__dict__.iteritems()]
 +    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
 +
 +  def __eq__(self, other):
 +    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
 +
 +  def __ne__(self, other):
 +    return not (self == other)
 +
 +class get_table_req_result:
 +  """
 +  Attributes:
 +   - success
 +   - o1
 +   - o2
 +  """
 +
 +  thrift_spec = (
 +    (0, TType.STRUCT, 'success', (GetTableResult, GetTableResult.thrift_spec), None, ), # 0
 +    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
 +    (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2
 +  )
 +
 +  def __init__(self, success=None, o1=None, o2=None,):
 +    self.success = success
 +    self.o1 = o1
 +    self.o2 = o2
 +
 +  def read(self, iprot):
 +    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
 +      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
 +      return
 +    iprot.readStructBegin()
 +    while True:
 +      (fname, ftype, fid) = iprot.readFieldBegin()
 +      if ftype == TType.STOP:
 +        break
 +      if fid == 0:
 +        if ftype == TType.STRUCT:
 +          self.success = GetTableResult()
            self.success.read(iprot)
          else:
            iprot.skip(ftype)
@@@ -22564,10 -22047,10 +22547,10 @@@ class get_table_names_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1026, _size1023) = iprot.readListBegin()
-           for _i1027 in xrange(_size1023):
-             _elem1028 = iprot.readString()
-             self.success.append(_elem1028)
 -          (_etype989, _size986) = iprot.readListBegin()
 -          for _i990 in xrange(_size986):
 -            _elem991 = iprot.readString()
 -            self.success.append(_elem991)
++          (_etype1003, _size1000) = iprot.readListBegin()
++          for _i1004 in xrange(_size1000):
++            _elem1005 = iprot.readString()
++            self.success.append(_elem1005)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -22602,8 -22085,8 +22585,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter1029 in self.success:
-         oprot.writeString(iter1029)
 -      for iter992 in self.success:
 -        oprot.writeString(iter992)
++      for iter1006 in self.success:
++        oprot.writeString(iter1006)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -23732,11 -23056,11 +23715,11 @@@ class add_partitions_args
        if fid == 1:
          if ftype == TType.LIST:
            self.new_parts = []
-           (_etype1033, _size1030) = iprot.readListBegin()
-           for _i1034 in xrange(_size1030):
-             _elem1035 = Partition()
-             _elem1035.read(iprot)
-             self.new_parts.append(_elem1035)
 -          (_etype996, _size993) = iprot.readListBegin()
 -          for _i997 in xrange(_size993):
 -            _elem998 = Partition()
 -            _elem998.read(iprot)
 -            self.new_parts.append(_elem998)
++          (_etype1010, _size1007) = iprot.readListBegin()
++          for _i1011 in xrange(_size1007):
++            _elem1012 = Partition()
++            _elem1012.read(iprot)
++            self.new_parts.append(_elem1012)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23753,8 -23077,8 +23736,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-       for iter1036 in self.new_parts:
-         iter1036.write(oprot)
 -      for iter999 in self.new_parts:
 -        iter999.write(oprot)
++      for iter1013 in self.new_parts:
++        iter1013.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -23912,11 -23236,11 +23895,11 @@@ class add_partitions_pspec_args
        if fid == 1:
          if ftype == TType.LIST:
            self.new_parts = []
-           (_etype1040, _size1037) = iprot.readListBegin()
-           for _i1041 in xrange(_size1037):
-             _elem1042 = PartitionSpec()
-             _elem1042.read(iprot)
-             self.new_parts.append(_elem1042)
 -          (_etype1003, _size1000) = iprot.readListBegin()
 -          for _i1004 in xrange(_size1000):
 -            _elem1005 = PartitionSpec()
 -            _elem1005.read(iprot)
 -            self.new_parts.append(_elem1005)
++          (_etype1017, _size1014) = iprot.readListBegin()
++          for _i1018 in xrange(_size1014):
++            _elem1019 = PartitionSpec()
++            _elem1019.read(iprot)
++            self.new_parts.append(_elem1019)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -23933,8 -23257,8 +23916,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 1)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-       for iter1043 in self.new_parts:
-         iter1043.write(oprot)
 -      for iter1006 in self.new_parts:
 -        iter1006.write(oprot)
++      for iter1020 in self.new_parts:
++        iter1020.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -24108,10 -23432,10 +24091,10 @@@ class append_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1047, _size1044) = iprot.readListBegin()
-           for _i1048 in xrange(_size1044):
-             _elem1049 = iprot.readString()
-             self.part_vals.append(_elem1049)
 -          (_etype1010, _size1007) = iprot.readListBegin()
 -          for _i1011 in xrange(_size1007):
 -            _elem1012 = iprot.readString()
 -            self.part_vals.append(_elem1012)
++          (_etype1024, _size1021) = iprot.readListBegin()
++          for _i1025 in xrange(_size1021):
++            _elem1026 = iprot.readString()
++            self.part_vals.append(_elem1026)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -24136,8 -23460,8 +24119,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1050 in self.part_vals:
-         oprot.writeString(iter1050)
 -      for iter1013 in self.part_vals:
 -        oprot.writeString(iter1013)
++      for iter1027 in self.part_vals:
++        oprot.writeString(iter1027)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -24490,10 -23814,10 +24473,10 @@@ class append_partition_with_environment
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1054, _size1051) = iprot.readListBegin()
-           for _i1055 in xrange(_size1051):
-             _elem1056 = iprot.readString()
-             self.part_vals.append(_elem1056)
 -          (_etype1017, _size1014) = iprot.readListBegin()
 -          for _i1018 in xrange(_size1014):
 -            _elem1019 = iprot.readString()
 -            self.part_vals.append(_elem1019)
++          (_etype1031, _size1028) = iprot.readListBegin()
++          for _i1032 in xrange(_size1028):
++            _elem1033 = iprot.readString()
++            self.part_vals.append(_elem1033)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -24524,8 -23848,8 +24507,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1057 in self.part_vals:
-         oprot.writeString(iter1057)
 -      for iter1020 in self.part_vals:
 -        oprot.writeString(iter1020)
++      for iter1034 in self.part_vals:
++        oprot.writeString(iter1034)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.environment_context is not None:
@@@ -25120,10 -24444,10 +25103,10 @@@ class drop_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1061, _size1058) = iprot.readListBegin()
-           for _i1062 in xrange(_size1058):
-             _elem1063 = iprot.readString()
-             self.part_vals.append(_elem1063)
 -          (_etype1024, _size1021) = iprot.readListBegin()
 -          for _i1025 in xrange(_size1021):
 -            _elem1026 = iprot.readString()
 -            self.part_vals.append(_elem1026)
++          (_etype1038, _size1035) = iprot.readListBegin()
++          for _i1039 in xrange(_size1035):
++            _elem1040 = iprot.readString()
++            self.part_vals.append(_elem1040)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -25153,8 -24477,8 +25136,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1064 in self.part_vals:
-         oprot.writeString(iter1064)
 -      for iter1027 in self.part_vals:
 -        oprot.writeString(iter1027)
++      for iter1041 in self.part_vals:
++        oprot.writeString(iter1041)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.deleteData is not None:
@@@ -25327,10 -24651,10 +25310,10 @@@ class drop_partition_with_environment_c
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1068, _size1065) = iprot.readListBegin()
-           for _i1069 in xrange(_size1065):
-             _elem1070 = iprot.readString()
-             self.part_vals.append(_elem1070)
 -          (_etype1031, _size1028) = iprot.readListBegin()
 -          for _i1032 in xrange(_size1028):
 -            _elem1033 = iprot.readString()
 -            self.part_vals.append(_elem1033)
++          (_etype1045, _size1042) = iprot.readListBegin()
++          for _i1046 in xrange(_size1042):
++            _elem1047 = iprot.readString()
++            self.part_vals.append(_elem1047)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -25366,8 -24690,8 +25349,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1071 in self.part_vals:
-         oprot.writeString(iter1071)
 -      for iter1034 in self.part_vals:
 -        oprot.writeString(iter1034)
++      for iter1048 in self.part_vals:
++        oprot.writeString(iter1048)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.deleteData is not None:
@@@ -26104,10 -25428,10 +26087,10 @@@ class get_partition_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1075, _size1072) = iprot.readListBegin()
-           for _i1076 in xrange(_size1072):
-             _elem1077 = iprot.readString()
-             self.part_vals.append(_elem1077)
 -          (_etype1038, _size1035) = iprot.readListBegin()
 -          for _i1039 in xrange(_size1035):
 -            _elem1040 = iprot.readString()
 -            self.part_vals.append(_elem1040)
++          (_etype1052, _size1049) = iprot.readListBegin()
++          for _i1053 in xrange(_size1049):
++            _elem1054 = iprot.readString()
++            self.part_vals.append(_elem1054)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26132,8 -25456,8 +26115,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1078 in self.part_vals:
-         oprot.writeString(iter1078)
 -      for iter1041 in self.part_vals:
 -        oprot.writeString(iter1041)
++      for iter1055 in self.part_vals:
++        oprot.writeString(iter1055)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -26292,11 -25616,11 +26275,11 @@@ class exchange_partition_args
        if fid == 1:
          if ftype == TType.MAP:
            self.partitionSpecs = {}
-           (_ktype1080, _vtype1081, _size1079 ) = iprot.readMapBegin()
-           for _i1083 in xrange(_size1079):
-             _key1084 = iprot.readString()
-             _val1085 = iprot.readString()
-             self.partitionSpecs[_key1084] = _val1085
 -          (_ktype1043, _vtype1044, _size1042 ) = iprot.readMapBegin()
 -          for _i1046 in xrange(_size1042):
 -            _key1047 = iprot.readString()
 -            _val1048 = iprot.readString()
 -            self.partitionSpecs[_key1047] = _val1048
++          (_ktype1057, _vtype1058, _size1056 ) = iprot.readMapBegin()
++          for _i1060 in xrange(_size1056):
++            _key1061 = iprot.readString()
++            _val1062 = iprot.readString()
++            self.partitionSpecs[_key1061] = _val1062
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -26333,9 -25657,9 +26316,9 @@@
      if self.partitionSpecs is not None:
        oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-       for kiter1086,viter1087 in self.partitionSpecs.items():
-         oprot.writeString(kiter1086)
-         oprot.writeString(viter1087)
 -      for kiter1049,viter1050 in self.partitionSpecs.items():
 -        oprot.writeString(kiter1049)
 -        oprot.writeString(viter1050)
++      for kiter1063,viter1064 in self.partitionSpecs.items():
++        oprot.writeString(kiter1063)
++        oprot.writeString(viter1064)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.source_db is not None:
@@@ -26540,11 -25864,11 +26523,11 @@@ class exchange_partitions_args
        if fid == 1:
          if ftype == TType.MAP:
            self.partitionSpecs = {}
-           (_ktype1089, _vtype1090, _size1088 ) = iprot.readMapBegin()
-           for _i1092 in xrange(_size1088):
-             _key1093 = iprot.readString()
-             _val1094 = iprot.readString()
-             self.partitionSpecs[_key1093] = _val1094
 -          (_ktype1052, _vtype1053, _size1051 ) = iprot.readMapBegin()
 -          for _i1055 in xrange(_size1051):
 -            _key1056 = iprot.readString()
 -            _val1057 = iprot.readString()
 -            self.partitionSpecs[_key1056] = _val1057
++          (_ktype1066, _vtype1067, _size1065 ) = iprot.readMapBegin()
++          for _i1069 in xrange(_size1065):
++            _key1070 = iprot.readString()
++            _val1071 = iprot.readString()
++            self.partitionSpecs[_key1070] = _val1071
            iprot.readMapEnd()
          else:
            iprot.skip(ftype)
@@@ -26581,9 -25905,9 +26564,9 @@@
      if self.partitionSpecs is not None:
        oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1)
        oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs))
-       for kiter1095,viter1096 in self.partitionSpecs.items():
-         oprot.writeString(kiter1095)
-         oprot.writeString(viter1096)
 -      for kiter1058,viter1059 in self.partitionSpecs.items():
 -        oprot.writeString(kiter1058)
 -        oprot.writeString(viter1059)
++      for kiter1072,viter1073 in self.partitionSpecs.items():
++        oprot.writeString(kiter1072)
++        oprot.writeString(viter1073)
        oprot.writeMapEnd()
        oprot.writeFieldEnd()
      if self.source_db is not None:
@@@ -26666,11 -25990,11 +26649,11 @@@ class exchange_partitions_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1100, _size1097) = iprot.readListBegin()
-           for _i1101 in xrange(_size1097):
-             _elem1102 = Partition()
-             _elem1102.read(iprot)
-             self.success.append(_elem1102)
 -          (_etype1063, _size1060) = iprot.readListBegin()
 -          for _i1064 in xrange(_size1060):
 -            _elem1065 = Partition()
 -            _elem1065.read(iprot)
 -            self.success.append(_elem1065)
++          (_etype1077, _size1074) = iprot.readListBegin()
++          for _i1078 in xrange(_size1074):
++            _elem1079 = Partition()
++            _elem1079.read(iprot)
++            self.success.append(_elem1079)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26711,8 -26035,8 +26694,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1103 in self.success:
-         iter1103.write(oprot)
 -      for iter1066 in self.success:
 -        iter1066.write(oprot)
++      for iter1080 in self.success:
++        iter1080.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -26806,10 -26130,10 +26789,10 @@@ class get_partition_with_auth_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1107, _size1104) = iprot.readListBegin()
-           for _i1108 in xrange(_size1104):
-             _elem1109 = iprot.readString()
-             self.part_vals.append(_elem1109)
 -          (_etype1070, _size1067) = iprot.readListBegin()
 -          for _i1071 in xrange(_size1067):
 -            _elem1072 = iprot.readString()
 -            self.part_vals.append(_elem1072)
++          (_etype1084, _size1081) = iprot.readListBegin()
++          for _i1085 in xrange(_size1081):
++            _elem1086 = iprot.readString()
++            self.part_vals.append(_elem1086)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26821,10 -26145,10 +26804,10 @@@
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
-           (_etype1113, _size1110) = iprot.readListBegin()
-           for _i1114 in xrange(_size1110):
-             _elem1115 = iprot.readString()
-             self.group_names.append(_elem1115)
 -          (_etype1076, _size1073) = iprot.readListBegin()
 -          for _i1077 in xrange(_size1073):
 -            _elem1078 = iprot.readString()
 -            self.group_names.append(_elem1078)
++          (_etype1090, _size1087) = iprot.readListBegin()
++          for _i1091 in xrange(_size1087):
++            _elem1092 = iprot.readString()
++            self.group_names.append(_elem1092)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -26849,8 -26173,8 +26832,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1116 in self.part_vals:
-         oprot.writeString(iter1116)
 -      for iter1079 in self.part_vals:
 -        oprot.writeString(iter1079)
++      for iter1093 in self.part_vals:
++        oprot.writeString(iter1093)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.user_name is not None:
@@@ -26860,8 -26184,8 +26843,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
-       for iter1117 in self.group_names:
-         oprot.writeString(iter1117)
 -      for iter1080 in self.group_names:
 -        oprot.writeString(iter1080)
++      for iter1094 in self.group_names:
++        oprot.writeString(iter1094)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -27290,11 -26614,11 +27273,11 @@@ class get_partitions_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1121, _size1118) = iprot.readListBegin()
-           for _i1122 in xrange(_size1118):
-             _elem1123 = Partition()
-             _elem1123.read(iprot)
-             self.success.append(_elem1123)
 -          (_etype1084, _size1081) = iprot.readListBegin()
 -          for _i1085 in xrange(_size1081):
 -            _elem1086 = Partition()
 -            _elem1086.read(iprot)
 -            self.success.append(_elem1086)
++          (_etype1098, _size1095) = iprot.readListBegin()
++          for _i1099 in xrange(_size1095):
++            _elem1100 = Partition()
++            _elem1100.read(iprot)
++            self.success.append(_elem1100)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -27323,8 -26647,8 +27306,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1124 in self.success:
-         iter1124.write(oprot)
 -      for iter1087 in self.success:
 -        iter1087.write(oprot)
++      for iter1101 in self.success:
++        iter1101.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -27418,10 -26742,10 +27401,10 @@@ class get_partitions_with_auth_args
        elif fid == 5:
          if ftype == TType.LIST:
            self.group_names = []
-           (_etype1128, _size1125) = iprot.readListBegin()
-           for _i1129 in xrange(_size1125):
-             _elem1130 = iprot.readString()
-             self.group_names.append(_elem1130)
 -          (_etype1091, _size1088) = iprot.readListBegin()
 -          for _i1092 in xrange(_size1088):
 -            _elem1093 = iprot.readString()
 -            self.group_names.append(_elem1093)
++          (_etype1105, _size1102) = iprot.readListBegin()
++          for _i1106 in xrange(_size1102):
++            _elem1107 = iprot.readString()
++            self.group_names.append(_elem1107)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -27454,8 -26778,8 +27437,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 5)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
-       for iter1131 in self.group_names:
-         oprot.writeString(iter1131)
 -      for iter1094 in self.group_names:
 -        oprot.writeString(iter1094)
++      for iter1108 in self.group_names:
++        oprot.writeString(iter1108)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -27516,11 -26840,11 +27499,11 @@@ class get_partitions_with_auth_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1135, _size1132) = iprot.readListBegin()
-           for _i1136 in xrange(_size1132):
-             _elem1137 = Partition()
-             _elem1137.read(iprot)
-             self.success.append(_elem1137)
 -          (_etype1098, _size1095) = iprot.readListBegin()
 -          for _i1099 in xrange(_size1095):
 -            _elem1100 = Partition()
 -            _elem1100.read(iprot)
 -            self.success.append(_elem1100)
++          (_etype1112, _size1109) = iprot.readListBegin()
++          for _i1113 in xrange(_size1109):
++            _elem1114 = Partition()
++            _elem1114.read(iprot)
++            self.success.append(_elem1114)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -27549,8 -26873,8 +27532,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1138 in self.success:
-         iter1138.write(oprot)
 -      for iter1101 in self.success:
 -        iter1101.write(oprot)
++      for iter1115 in self.success:
++        iter1115.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -27708,11 -27032,11 +27691,11 @@@ class get_partitions_pspec_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1142, _size1139) = iprot.readListBegin()
-           for _i1143 in xrange(_size1139):
-             _elem1144 = PartitionSpec()
-             _elem1144.read(iprot)
-             self.success.append(_elem1144)
 -          (_etype1105, _size1102) = iprot.readListBegin()
 -          for _i1106 in xrange(_size1102):
 -            _elem1107 = PartitionSpec()
 -            _elem1107.read(iprot)
 -            self.success.append(_elem1107)
++          (_etype1119, _size1116) = iprot.readListBegin()
++          for _i1120 in xrange(_size1116):
++            _elem1121 = PartitionSpec()
++            _elem1121.read(iprot)
++            self.success.append(_elem1121)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -27741,8 -27065,8 +27724,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1145 in self.success:
-         iter1145.write(oprot)
 -      for iter1108 in self.success:
 -        iter1108.write(oprot)
++      for iter1122 in self.success:
++        iter1122.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -27900,10 -27224,10 +27883,10 @@@ class get_partition_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1149, _size1146) = iprot.readListBegin()
-           for _i1150 in xrange(_size1146):
-             _elem1151 = iprot.readString()
-             self.success.append(_elem1151)
 -          (_etype1112, _size1109) = iprot.readListBegin()
 -          for _i1113 in xrange(_size1109):
 -            _elem1114 = iprot.readString()
 -            self.success.append(_elem1114)
++          (_etype1126, _size1123) = iprot.readListBegin()
++          for _i1127 in xrange(_size1123):
++            _elem1128 = iprot.readString()
++            self.success.append(_elem1128)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -27932,8 -27256,8 +27915,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter1152 in self.success:
-         oprot.writeString(iter1152)
 -      for iter1115 in self.success:
 -        oprot.writeString(iter1115)
++      for iter1129 in self.success:
++        oprot.writeString(iter1129)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -28173,10 -27497,10 +28156,10 @@@ class get_partitions_ps_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1156, _size1153) = iprot.readListBegin()
-           for _i1157 in xrange(_size1153):
-             _elem1158 = iprot.readString()
-             self.part_vals.append(_elem1158)
 -          (_etype1119, _size1116) = iprot.readListBegin()
 -          for _i1120 in xrange(_size1116):
 -            _elem1121 = iprot.readString()
 -            self.part_vals.append(_elem1121)
++          (_etype1133, _size1130) = iprot.readListBegin()
++          for _i1134 in xrange(_size1130):
++            _elem1135 = iprot.readString()
++            self.part_vals.append(_elem1135)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28206,8 -27530,8 +28189,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1159 in self.part_vals:
-         oprot.writeString(iter1159)
 -      for iter1122 in self.part_vals:
 -        oprot.writeString(iter1122)
++      for iter1136 in self.part_vals:
++        oprot.writeString(iter1136)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -28271,11 -27595,11 +28254,11 @@@ class get_partitions_ps_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1163, _size1160) = iprot.readListBegin()
-           for _i1164 in xrange(_size1160):
-             _elem1165 = Partition()
-             _elem1165.read(iprot)
-             self.success.append(_elem1165)
 -          (_etype1126, _size1123) = iprot.readListBegin()
 -          for _i1127 in xrange(_size1123):
 -            _elem1128 = Partition()
 -            _elem1128.read(iprot)
 -            self.success.append(_elem1128)
++          (_etype1140, _size1137) = iprot.readListBegin()
++          for _i1141 in xrange(_size1137):
++            _elem1142 = Partition()
++            _elem1142.read(iprot)
++            self.success.append(_elem1142)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28304,8 -27628,8 +28287,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1166 in self.success:
-         iter1166.write(oprot)
 -      for iter1129 in self.success:
 -        iter1129.write(oprot)
++      for iter1143 in self.success:
++        iter1143.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -28392,10 -27716,10 +28375,10 @@@ class get_partitions_ps_with_auth_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1170, _size1167) = iprot.readListBegin()
-           for _i1171 in xrange(_size1167):
-             _elem1172 = iprot.readString()
-             self.part_vals.append(_elem1172)
 -          (_etype1133, _size1130) = iprot.readListBegin()
 -          for _i1134 in xrange(_size1130):
 -            _elem1135 = iprot.readString()
 -            self.part_vals.append(_elem1135)
++          (_etype1147, _size1144) = iprot.readListBegin()
++          for _i1148 in xrange(_size1144):
++            _elem1149 = iprot.readString()
++            self.part_vals.append(_elem1149)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28412,10 -27736,10 +28395,10 @@@
        elif fid == 6:
          if ftype == TType.LIST:
            self.group_names = []
-           (_etype1176, _size1173) = iprot.readListBegin()
-           for _i1177 in xrange(_size1173):
-             _elem1178 = iprot.readString()
-             self.group_names.append(_elem1178)
 -          (_etype1139, _size1136) = iprot.readListBegin()
 -          for _i1140 in xrange(_size1136):
 -            _elem1141 = iprot.readString()
 -            self.group_names.append(_elem1141)
++          (_etype1153, _size1150) = iprot.readListBegin()
++          for _i1154 in xrange(_size1150):
++            _elem1155 = iprot.readString()
++            self.group_names.append(_elem1155)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28440,8 -27764,8 +28423,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1179 in self.part_vals:
-         oprot.writeString(iter1179)
 -      for iter1142 in self.part_vals:
 -        oprot.writeString(iter1142)
++      for iter1156 in self.part_vals:
++        oprot.writeString(iter1156)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -28455,8 -27779,8 +28438,8 @@@
      if self.group_names is not None:
        oprot.writeFieldBegin('group_names', TType.LIST, 6)
        oprot.writeListBegin(TType.STRING, len(self.group_names))
-       for iter1180 in self.group_names:
-         oprot.writeString(iter1180)
 -      for iter1143 in self.group_names:
 -        oprot.writeString(iter1143)
++      for iter1157 in self.group_names:
++        oprot.writeString(iter1157)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -28518,11 -27842,11 +28501,11 @@@ class get_partitions_ps_with_auth_resul
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1184, _size1181) = iprot.readListBegin()
-           for _i1185 in xrange(_size1181):
-             _elem1186 = Partition()
-             _elem1186.read(iprot)
-             self.success.append(_elem1186)
 -          (_etype1147, _size1144) = iprot.readListBegin()
 -          for _i1148 in xrange(_size1144):
 -            _elem1149 = Partition()
 -            _elem1149.read(iprot)
 -            self.success.append(_elem1149)
++          (_etype1161, _size1158) = iprot.readListBegin()
++          for _i1162 in xrange(_size1158):
++            _elem1163 = Partition()
++            _elem1163.read(iprot)
++            self.success.append(_elem1163)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28551,8 -27875,8 +28534,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1187 in self.success:
-         iter1187.write(oprot)
 -      for iter1150 in self.success:
 -        iter1150.write(oprot)
++      for iter1164 in self.success:
++        iter1164.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -28633,10 -27957,10 +28616,10 @@@ class get_partition_names_ps_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1191, _size1188) = iprot.readListBegin()
-           for _i1192 in xrange(_size1188):
-             _elem1193 = iprot.readString()
-             self.part_vals.append(_elem1193)
 -          (_etype1154, _size1151) = iprot.readListBegin()
 -          for _i1155 in xrange(_size1151):
 -            _elem1156 = iprot.readString()
 -            self.part_vals.append(_elem1156)
++          (_etype1168, _size1165) = iprot.readListBegin()
++          for _i1169 in xrange(_size1165):
++            _elem1170 = iprot.readString()
++            self.part_vals.append(_elem1170)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28666,8 -27990,8 +28649,8 @@@
      if self.part_vals is not None:
        oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1194 in self.part_vals:
-         oprot.writeString(iter1194)
 -      for iter1157 in self.part_vals:
 -        oprot.writeString(iter1157)
++      for iter1171 in self.part_vals:
++        oprot.writeString(iter1171)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.max_parts is not None:
@@@ -28731,10 -28055,10 +28714,10 @@@ class get_partition_names_ps_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1198, _size1195) = iprot.readListBegin()
-           for _i1199 in xrange(_size1195):
-             _elem1200 = iprot.readString()
-             self.success.append(_elem1200)
 -          (_etype1161, _size1158) = iprot.readListBegin()
 -          for _i1162 in xrange(_size1158):
 -            _elem1163 = iprot.readString()
 -            self.success.append(_elem1163)
++          (_etype1175, _size1172) = iprot.readListBegin()
++          for _i1176 in xrange(_size1172):
++            _elem1177 = iprot.readString()
++            self.success.append(_elem1177)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28763,8 -28087,8 +28746,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRING, len(self.success))
-       for iter1201 in self.success:
-         oprot.writeString(iter1201)
 -      for iter1164 in self.success:
 -        oprot.writeString(iter1164)
++      for iter1178 in self.success:
++        oprot.writeString(iter1178)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -28935,11 -28259,11 +28918,11 @@@ class get_partitions_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1205, _size1202) = iprot.readListBegin()
-           for _i1206 in xrange(_size1202):
-             _elem1207 = Partition()
-             _elem1207.read(iprot)
-             self.success.append(_elem1207)
 -          (_etype1168, _size1165) = iprot.readListBegin()
 -          for _i1169 in xrange(_size1165):
 -            _elem1170 = Partition()
 -            _elem1170.read(iprot)
 -            self.success.append(_elem1170)
++          (_etype1182, _size1179) = iprot.readListBegin()
++          for _i1183 in xrange(_size1179):
++            _elem1184 = Partition()
++            _elem1184.read(iprot)
++            self.success.append(_elem1184)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -28968,8 -28292,8 +28951,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1208 in self.success:
-         iter1208.write(oprot)
 -      for iter1171 in self.success:
 -        iter1171.write(oprot)
++      for iter1185 in self.success:
++        iter1185.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -29140,11 -28464,11 +29123,11 @@@ class get_part_specs_by_filter_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1212, _size1209) = iprot.readListBegin()
-           for _i1213 in xrange(_size1209):
-             _elem1214 = PartitionSpec()
-             _elem1214.read(iprot)
-             self.success.append(_elem1214)
 -          (_etype1175, _size1172) = iprot.readListBegin()
 -          for _i1176 in xrange(_size1172):
 -            _elem1177 = PartitionSpec()
 -            _elem1177.read(iprot)
 -            self.success.append(_elem1177)
++          (_etype1189, _size1186) = iprot.readListBegin()
++          for _i1190 in xrange(_size1186):
++            _elem1191 = PartitionSpec()
++            _elem1191.read(iprot)
++            self.success.append(_elem1191)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -29173,8 -28497,8 +29156,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1215 in self.success:
-         iter1215.write(oprot)
 -      for iter1178 in self.success:
 -        iter1178.write(oprot)
++      for iter1192 in self.success:
++        iter1192.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -29594,10 -28918,10 +29577,10 @@@ class get_partitions_by_names_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.names = []
-           (_etype1219, _size1216) = iprot.readListBegin()
-           for _i1220 in xrange(_size1216):
-             _elem1221 = iprot.readString()
-             self.names.append(_elem1221)
 -          (_etype1182, _size1179) = iprot.readListBegin()
 -          for _i1183 in xrange(_size1179):
 -            _elem1184 = iprot.readString()
 -            self.names.append(_elem1184)
++          (_etype1196, _size1193) = iprot.readListBegin()
++          for _i1197 in xrange(_size1193):
++            _elem1198 = iprot.readString()
++            self.names.append(_elem1198)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -29622,8 -28946,8 +29605,8 @@@
      if self.names is not None:
        oprot.writeFieldBegin('names', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.names))
-       for iter1222 in self.names:
-         oprot.writeString(iter1222)
 -      for iter1185 in self.names:
 -        oprot.writeString(iter1185)
++      for iter1199 in self.names:
++        oprot.writeString(iter1199)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -29682,11 -29006,11 +29665,11 @@@ class get_partitions_by_names_result
        if fid == 0:
          if ftype == TType.LIST:
            self.success = []
-           (_etype1226, _size1223) = iprot.readListBegin()
-           for _i1227 in xrange(_size1223):
-             _elem1228 = Partition()
-             _elem1228.read(iprot)
-             self.success.append(_elem1228)
 -          (_etype1189, _size1186) = iprot.readListBegin()
 -          for _i1190 in xrange(_size1186):
 -            _elem1191 = Partition()
 -            _elem1191.read(iprot)
 -            self.success.append(_elem1191)
++          (_etype1203, _size1200) = iprot.readListBegin()
++          for _i1204 in xrange(_size1200):
++            _elem1205 = Partition()
++            _elem1205.read(iprot)
++            self.success.append(_elem1205)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -29715,8 -29039,8 +29698,8 @@@
      if self.success is not None:
        oprot.writeFieldBegin('success', TType.LIST, 0)
        oprot.writeListBegin(TType.STRUCT, len(self.success))
-       for iter1229 in self.success:
-         iter1229.write(oprot)
 -      for iter1192 in self.success:
 -        iter1192.write(oprot)
++      for iter1206 in self.success:
++        iter1206.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.o1 is not None:
@@@ -29966,11 -29290,11 +29949,11 @@@ class alter_partitions_args
        elif fid == 3:
          if ftype == TType.LIST:
            self.new_parts = []
-           (_etype1233, _size1230) = iprot.readListBegin()
-           for _i1234 in xrange(_size1230):
-             _elem1235 = Partition()
-             _elem1235.read(iprot)
-             self.new_parts.append(_elem1235)
 -          (_etype1196, _size1193) = iprot.readListBegin()
 -          for _i1197 in xrange(_size1193):
 -            _elem1198 = Partition()
 -            _elem1198.read(iprot)
 -            self.new_parts.append(_elem1198)
++          (_etype1210, _size1207) = iprot.readListBegin()
++          for _i1211 in xrange(_size1207):
++            _elem1212 = Partition()
++            _elem1212.read(iprot)
++            self.new_parts.append(_elem1212)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -29995,8 -29319,8 +29978,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-       for iter1236 in self.new_parts:
-         iter1236.write(oprot)
 -      for iter1199 in self.new_parts:
 -        iter1199.write(oprot)
++      for iter1213 in self.new_parts:
++        iter1213.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
@@@ -30149,11 -29473,11 +30132,11 @@@ class alter_partitions_with_environment
        elif fid == 3:
          if ftype == TType.LIST:
            self.new_parts = []
-           (_etype1240, _size1237) = iprot.readListBegin()
-           for _i1241 in xrange(_size1237):
-             _elem1242 = Partition()
-             _elem1242.read(iprot)
-             self.new_parts.append(_elem1242)
 -          (_etype1203, _size1200) = iprot.readListBegin()
 -          for _i1204 in xrange(_size1200):
 -            _elem1205 = Partition()
 -            _elem1205.read(iprot)
 -            self.new_parts.append(_elem1205)
++          (_etype1217, _size1214) = iprot.readListBegin()
++          for _i1218 in xrange(_size1214):
++            _elem1219 = Partition()
++            _elem1219.read(iprot)
++            self.new_parts.append(_elem1219)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -30184,8 -29508,8 +30167,8 @@@
      if self.new_parts is not None:
        oprot.writeFieldBegin('new_parts', TType.LIST, 3)
        oprot.writeListBegin(TType.STRUCT, len(self.new_parts))
-       for iter1243 in self.new_parts:
-         iter1243.write(oprot)
 -      for iter1206 in self.new_parts:
 -        iter1206.write(oprot)
++      for iter1220 in self.new_parts:
++        iter1220.write(oprot)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
      if self.environment_context is not None:
@@@ -30676,22 -30028,12 +30659,22 @@@ class rename_partition_args
        if ftype == TType.STOP:
          break
        if fid == 1:
 +        if ftype == TType.STRING:
 +          self.db_name = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 2:
 +        if ftype == TType.STRING:
 +          self.tbl_name = iprot.readString()
 +        else:
 +          iprot.skip(ftype)
 +      elif fid == 3:
          if ftype == TType.LIST:
            self.part_vals = []
-           (_etype1247, _size1244) = iprot.readListBegin()
-           for _i1248 in xrange(_size1244):
-             _elem1249 = iprot.readString()
-             self.part_vals.append(_elem1249)
 -          (_etype1217, _size1214) = iprot.readListBegin()
 -          for _i1218 in xrange(_size1214):
 -            _elem1219 = iprot.readString()
 -            self.part_vals.append(_elem1219)
++          (_etype1224, _size1221) = iprot.readListBegin()
++          for _i1225 in xrange(_size1221):
++            _elem1226 = iprot.readString()
++            self.part_vals.append(_elem1226)
            iprot.readListEnd()
          else:
            iprot.skip(ftype)
@@@ -30710,25 -30051,17 +30693,25 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('partition_name_has_valid_characters_args')
 +    oprot.writeStructBegin('rename_partition_args')
 +    if self.db_name is not None:
 +      oprot.writeFieldBegin('db_name', TType.STRING, 1)
 +      oprot.writeString(self.db_name)
 +      oprot.writeFieldEnd()
 +    if self.tbl_name is not None:
 +      oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
 +      oprot.writeString(self.tbl_name)
 +      oprot.writeFieldEnd()
      if self.part_vals is not None:
 -      oprot.writeFieldBegin('part_vals', TType.LIST, 1)
 +      oprot.writeFieldBegin('part_vals', TType.LIST, 3)
        oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1250 in self.part_vals:
-         oprot.writeString(iter1250)
 -      for iter1220 in self.part_vals:
 -        oprot.writeString(iter1220)
++      for iter1227 in self.part_vals:
++        oprot.writeString(iter1227)
        oprot.writeListEnd()
        oprot.writeFieldEnd()
 -    if self.throw_exception is not None:
 -      oprot.writeFieldBegin('throw_exception', TType.BOOL, 2)
 -      oprot.writeBool(self.throw_exception)
 +    if self.new_part is not None:
 +      oprot.writeFieldBegin('new_part', TType.STRUCT, 4)
 +      self.new_part.write(oprot)
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()
@@@ -30863,13 -30192,8 +30846,13 @@@ class partition_name_has_valid_characte
        if ftype == TType.STOP:
          break
        if fid == 1:
 -        if ftype == TType.STRING:
 -          self.name = iprot.readString()
 +        if ftype == TType.LIST:
 +          self.part_vals = []
-           (_etype1254, _size1251) = iprot.readListBegin()
-           for _i1255 in xrange(_size1251):
-             _elem1256 = iprot.readString()
-             self.part_vals.append(_elem1256)
++          (_etype1231, _size1228) = iprot.readListBegin()
++          for _i1232 in xrange(_size1228):
++            _elem1233 = iprot.readString()
++            self.part_vals.append(_elem1233)
 +          iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 2:
@@@ -30886,17 -30210,14 +30869,17 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('get_config_value_args')
 -    if self.name is not None:
 -      oprot.writeFieldBegin('name', TType.STRING, 1)
 -      oprot.writeString(self.name)
 +    oprot.writeStructBegin('partition_name_has_valid_characters_args')
 +    if self.part_vals is not None:
 +      oprot.writeFieldBegin('part_vals', TType.LIST, 1)
 +      oprot.writeListBegin(TType.STRING, len(self.part_vals))
-       for iter1257 in self.part_vals:
-         oprot.writeString(iter1257)
++      for iter1234 in self.part_vals:
++        oprot.writeString(iter1234)
 +      oprot.writeListEnd()
        oprot.writeFieldEnd()
 -    if self.defaultValue is not None:
 -      oprot.writeFieldBegin('defaultValue', TType.STRING, 2)
 -      oprot.writeString(self.defaultValue)
 +    if self.throw_exception is not None:
 +      oprot.writeFieldBegin('throw_exception', TType.BOOL, 2)
 +      oprot.writeBool(self.throw_exception)
        oprot.writeFieldEnd()
      oprot.writeFieldStop()
      oprot.writeStructEnd()
@@@ -31247,13 -30563,14 +31230,13 @@@ class partition_name_to_vals_result
        if ftype == TType.STOP:
          break
        if fid == 0:
 -        if ftype == TType.MAP:
 -          self.success = {}
 -          (_ktype1229, _vtype1230, _size1228 ) = iprot.readMapBegin()
 -          for _i1232 in xrange(_size1228):
 -            _key1233 = iprot.readString()
 -            _val1234 = iprot.readString()
 -            self.success[_key1233] = _val1234
 -          iprot.readMapEnd()
 +        if ftype == TType.LIST:
 +          self.success = []
-           (_etype1261, _size1258) = iprot.readListBegin()
-           for _i1262 in xrange(_size1258):
-             _elem1263 = iprot.readString()
-             self.success.append(_elem1263)
++          (_etype1238, _size1235) = iprot.readListBegin()
++          for _i1239 in xrange(_size1235):
++            _elem1240 = iprot.readString()
++            self.success.append(_elem1240)
 +          iprot.readListEnd()
          else:
            iprot.skip(ftype)
        elif fid == 1:
@@@ -31271,13 -30588,14 +31254,13 @@@
      if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
        oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
        return
 -    oprot.writeStructBegin('partition_name_to_spec_result')
 +    oprot.writeStructBegin('partition_name_to_vals_result')
      if self.success is not None:
 -      oprot.writeFieldBegin('success', TType.MAP, 0)
 -      oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success))
 -      for kiter1235,viter1236 in self.success.items():
 -        oprot.writeString(kiter1235)
 -        oprot.writeString(viter1236)
 -      oprot.writeMapEnd()
 +      oprot.writeFieldBegin('success', TType.LIST, 0)
 +      oprot.writeListBegin(TType.STRING, len(

<TRUNCATED>

[47/48] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0718

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/37a1907b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index fc3da46,24ffadb..af77e0e
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@@ -43189,13 -42384,13 +43189,13 @@@ import org.slf4j.LoggerFactory
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list968.size);
-                   String _elem969;
-                   for (int _i970 = 0; _i970 < _list968.size; ++_i970)
 -                  org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
 -                  struct.success = new ArrayList<String>(_list944.size);
 -                  String _elem945;
 -                  for (int _i946 = 0; _i946 < _list944.size; ++_i946)
++                  org.apache.thrift.protocol.TList _list960 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list960.size);
++                  String _elem961;
++                  for (int _i962 = 0; _i962 < _list960.size; ++_i962)
                    {
-                     _elem969 = iprot.readString();
-                     struct.success.add(_elem969);
 -                    _elem945 = iprot.readString();
 -                    struct.success.add(_elem945);
++                    _elem961 = iprot.readString();
++                    struct.success.add(_elem961);
                    }
                    iprot.readListEnd();
                  }
@@@ -43230,9 -42425,9 +43230,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter971 : struct.success)
 -            for (String _iter947 : struct.success)
++            for (String _iter963 : struct.success)
              {
-               oprot.writeString(_iter971);
 -              oprot.writeString(_iter947);
++              oprot.writeString(_iter963);
              }
              oprot.writeListEnd();
            }
@@@ -43271,9 -42466,9 +43271,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (String _iter972 : struct.success)
 -            for (String _iter948 : struct.success)
++            for (String _iter964 : struct.success)
              {
-               oprot.writeString(_iter972);
 -              oprot.writeString(_iter948);
++              oprot.writeString(_iter964);
              }
            }
          }
@@@ -43288,13 -42483,13 +43288,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.success = new ArrayList<String>(_list973.size);
-             String _elem974;
-             for (int _i975 = 0; _i975 < _list973.size; ++_i975)
 -            org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.success = new ArrayList<String>(_list949.size);
 -            String _elem950;
 -            for (int _i951 = 0; _i951 < _list949.size; ++_i951)
++            org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list965.size);
++            String _elem966;
++            for (int _i967 = 0; _i967 < _list965.size; ++_i967)
              {
-               _elem974 = iprot.readString();
-               struct.success.add(_elem974);
 -              _elem950 = iprot.readString();
 -              struct.success.add(_elem950);
++              _elem966 = iprot.readString();
++              struct.success.add(_elem966);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -43948,13 -43143,13 +43948,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list976 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list976.size);
-                   String _elem977;
-                   for (int _i978 = 0; _i978 < _list976.size; ++_i978)
 -                  org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
 -                  struct.success = new ArrayList<String>(_list952.size);
 -                  String _elem953;
 -                  for (int _i954 = 0; _i954 < _list952.size; ++_i954)
++                  org.apache.thrift.protocol.TList _list968 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list968.size);
++                  String _elem969;
++                  for (int _i970 = 0; _i970 < _list968.size; ++_i970)
                    {
-                     _elem977 = iprot.readString();
-                     struct.success.add(_elem977);
 -                    _elem953 = iprot.readString();
 -                    struct.success.add(_elem953);
++                    _elem969 = iprot.readString();
++                    struct.success.add(_elem969);
                    }
                    iprot.readListEnd();
                  }
@@@ -43989,9 -43184,9 +43989,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter979 : struct.success)
 -            for (String _iter955 : struct.success)
++            for (String _iter971 : struct.success)
              {
-               oprot.writeString(_iter979);
 -              oprot.writeString(_iter955);
++              oprot.writeString(_iter971);
              }
              oprot.writeListEnd();
            }
@@@ -44030,9 -43225,9 +44030,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (String _iter980 : struct.success)
 -            for (String _iter956 : struct.success)
++            for (String _iter972 : struct.success)
              {
-               oprot.writeString(_iter980);
 -              oprot.writeString(_iter956);
++              oprot.writeString(_iter972);
              }
            }
          }
@@@ -44047,13 -43242,13 +44047,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.success = new ArrayList<String>(_list981.size);
-             String _elem982;
-             for (int _i983 = 0; _i983 < _list981.size; ++_i983)
 -            org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.success = new ArrayList<String>(_list957.size);
 -            String _elem958;
 -            for (int _i959 = 0; _i959 < _list957.size; ++_i959)
++            org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list973.size);
++            String _elem974;
++            for (int _i975 = 0; _i975 < _list973.size; ++_i975)
              {
-               _elem982 = iprot.readString();
-               struct.success.add(_elem982);
 -              _elem958 = iprot.readString();
 -              struct.success.add(_elem958);
++              _elem974 = iprot.readString();
++              struct.success.add(_elem974);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -48660,16 -47855,16 +48660,16 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
                  {
-                   org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin();
-                   struct.success = new HashMap<String,Type>(2*_map984.size);
-                   String _key985;
-                   Type _val986;
-                   for (int _i987 = 0; _i987 < _map984.size; ++_i987)
 -                  org.apache.thrift.protocol.TMap _map960 = iprot.readMapBegin();
 -                  struct.success = new HashMap<String,Type>(2*_map960.size);
 -                  String _key961;
 -                  Type _val962;
 -                  for (int _i963 = 0; _i963 < _map960.size; ++_i963)
++                  org.apache.thrift.protocol.TMap _map976 = iprot.readMapBegin();
++                  struct.success = new HashMap<String,Type>(2*_map976.size);
++                  String _key977;
++                  Type _val978;
++                  for (int _i979 = 0; _i979 < _map976.size; ++_i979)
                    {
-                     _key985 = iprot.readString();
-                     _val986 = new Type();
-                     _val986.read(iprot);
-                     struct.success.put(_key985, _val986);
 -                    _key961 = iprot.readString();
 -                    _val962 = new Type();
 -                    _val962.read(iprot);
 -                    struct.success.put(_key961, _val962);
++                    _key977 = iprot.readString();
++                    _val978 = new Type();
++                    _val978.read(iprot);
++                    struct.success.put(_key977, _val978);
                    }
                    iprot.readMapEnd();
                  }
@@@ -48704,10 -47899,10 +48704,10 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (Map.Entry<String, Type> _iter988 : struct.success.entrySet())
 -            for (Map.Entry<String, Type> _iter964 : struct.success.entrySet())
++            for (Map.Entry<String, Type> _iter980 : struct.success.entrySet())
              {
-               oprot.writeString(_iter988.getKey());
-               _iter988.getValue().write(oprot);
 -              oprot.writeString(_iter964.getKey());
 -              _iter964.getValue().write(oprot);
++              oprot.writeString(_iter980.getKey());
++              _iter980.getValue().write(oprot);
              }
              oprot.writeMapEnd();
            }
@@@ -48746,10 -47941,10 +48746,10 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (Map.Entry<String, Type> _iter989 : struct.success.entrySet())
 -            for (Map.Entry<String, Type> _iter965 : struct.success.entrySet())
++            for (Map.Entry<String, Type> _iter981 : struct.success.entrySet())
              {
-               oprot.writeString(_iter989.getKey());
-               _iter989.getValue().write(oprot);
 -              oprot.writeString(_iter965.getKey());
 -              _iter965.getValue().write(oprot);
++              oprot.writeString(_iter981.getKey());
++              _iter981.getValue().write(oprot);
              }
            }
          }
@@@ -48764,16 -47959,16 +48764,16 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new HashMap<String,Type>(2*_map990.size);
-             String _key991;
-             Type _val992;
-             for (int _i993 = 0; _i993 < _map990.size; ++_i993)
 -            org.apache.thrift.protocol.TMap _map966 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new HashMap<String,Type>(2*_map966.size);
 -            String _key967;
 -            Type _val968;
 -            for (int _i969 = 0; _i969 < _map966.size; ++_i969)
++            org.apache.thrift.protocol.TMap _map982 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new HashMap<String,Type>(2*_map982.size);
++            String _key983;
++            Type _val984;
++            for (int _i985 = 0; _i985 < _map982.size; ++_i985)
              {
-               _key991 = iprot.readString();
-               _val992 = new Type();
-               _val992.read(iprot);
-               struct.success.put(_key991, _val992);
 -              _key967 = iprot.readString();
 -              _val968 = new Type();
 -              _val968.read(iprot);
 -              struct.success.put(_key967, _val968);
++              _key983 = iprot.readString();
++              _val984 = new Type();
++              _val984.read(iprot);
++              struct.success.put(_key983, _val984);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -49808,14 -49003,14 +49808,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
-                   struct.success = new ArrayList<FieldSchema>(_list994.size);
-                   FieldSchema _elem995;
-                   for (int _i996 = 0; _i996 < _list994.size; ++_i996)
 -                  org.apache.thrift.protocol.TList _list970 = iprot.readListBegin();
 -                  struct.success = new ArrayList<FieldSchema>(_list970.size);
 -                  FieldSchema _elem971;
 -                  for (int _i972 = 0; _i972 < _list970.size; ++_i972)
++                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
++                  struct.success = new ArrayList<FieldSchema>(_list986.size);
++                  FieldSchema _elem987;
++                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
                    {
-                     _elem995 = new FieldSchema();
-                     _elem995.read(iprot);
-                     struct.success.add(_elem995);
 -                    _elem971 = new FieldSchema();
 -                    _elem971.read(iprot);
 -                    struct.success.add(_elem971);
++                    _elem987 = new FieldSchema();
++                    _elem987.read(iprot);
++                    struct.success.add(_elem987);
                    }
                    iprot.readListEnd();
                  }
@@@ -49868,9 -49063,9 +49868,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (FieldSchema _iter997 : struct.success)
 -            for (FieldSchema _iter973 : struct.success)
++            for (FieldSchema _iter989 : struct.success)
              {
-               _iter997.write(oprot);
 -              _iter973.write(oprot);
++              _iter989.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -49925,9 -49120,9 +49925,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (FieldSchema _iter998 : struct.success)
 -            for (FieldSchema _iter974 : struct.success)
++            for (FieldSchema _iter990 : struct.success)
              {
-               _iter998.write(oprot);
 -              _iter974.write(oprot);
++              _iter990.write(oprot);
              }
            }
          }
@@@ -49948,14 -49143,14 +49948,14 @@@
          BitSet incoming = iprot.readBitSet(4);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new ArrayList<FieldSchema>(_list999.size);
-             FieldSchema _elem1000;
-             for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
 -            org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new ArrayList<FieldSchema>(_list975.size);
 -            FieldSchema _elem976;
 -            for (int _i977 = 0; _i977 < _list975.size; ++_i977)
++            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<FieldSchema>(_list991.size);
++            FieldSchema _elem992;
++            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
              {
-               _elem1000 = new FieldSchema();
-               _elem1000.read(iprot);
-               struct.success.add(_elem1000);
 -              _elem976 = new FieldSchema();
 -              _elem976.read(iprot);
 -              struct.success.add(_elem976);
++              _elem992 = new FieldSchema();
++              _elem992.read(iprot);
++              struct.success.add(_elem992);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -51109,14 -50304,14 +51109,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
-                   struct.success = new ArrayList<FieldSchema>(_list1002.size);
-                   FieldSchema _elem1003;
-                   for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
 -                  org.apache.thrift.protocol.TList _list978 = iprot.readListBegin();
 -                  struct.success = new ArrayList<FieldSchema>(_list978.size);
 -                  FieldSchema _elem979;
 -                  for (int _i980 = 0; _i980 < _list978.size; ++_i980)
++                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
++                  struct.success = new ArrayList<FieldSchema>(_list994.size);
++                  FieldSchema _elem995;
++                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
                    {
-                     _elem1003 = new FieldSchema();
-                     _elem1003.read(iprot);
-                     struct.success.add(_elem1003);
 -                    _elem979 = new FieldSchema();
 -                    _elem979.read(iprot);
 -                    struct.success.add(_elem979);
++                    _elem995 = new FieldSchema();
++                    _elem995.read(iprot);
++                    struct.success.add(_elem995);
                    }
                    iprot.readListEnd();
                  }
@@@ -51169,9 -50364,9 +51169,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (FieldSchema _iter1005 : struct.success)
 -            for (FieldSchema _iter981 : struct.success)
++            for (FieldSchema _iter997 : struct.success)
              {
-               _iter1005.write(oprot);
 -              _iter981.write(oprot);
++              _iter997.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -51226,9 -50421,9 +51226,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (FieldSchema _iter1006 : struct.success)
 -            for (FieldSchema _iter982 : struct.success)
++            for (FieldSchema _iter998 : struct.success)
              {
-               _iter1006.write(oprot);
 -              _iter982.write(oprot);
++              _iter998.write(oprot);
              }
            }
          }
@@@ -51249,14 -50444,14 +51249,14 @@@
          BitSet incoming = iprot.readBitSet(4);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new ArrayList<FieldSchema>(_list1007.size);
-             FieldSchema _elem1008;
-             for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
 -            org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new ArrayList<FieldSchema>(_list983.size);
 -            FieldSchema _elem984;
 -            for (int _i985 = 0; _i985 < _list983.size; ++_i985)
++            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<FieldSchema>(_list999.size);
++            FieldSchema _elem1000;
++            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
              {
-               _elem1008 = new FieldSchema();
-               _elem1008.read(iprot);
-               struct.success.add(_elem1008);
 -              _elem984 = new FieldSchema();
 -              _elem984.read(iprot);
 -              struct.success.add(_elem984);
++              _elem1000 = new FieldSchema();
++              _elem1000.read(iprot);
++              struct.success.add(_elem1000);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -52301,14 -51496,14 +52301,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
-                   struct.success = new ArrayList<FieldSchema>(_list1010.size);
-                   FieldSchema _elem1011;
-                   for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
 -                  org.apache.thrift.protocol.TList _list986 = iprot.readListBegin();
 -                  struct.success = new ArrayList<FieldSchema>(_list986.size);
 -                  FieldSchema _elem987;
 -                  for (int _i988 = 0; _i988 < _list986.size; ++_i988)
++                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
++                  struct.success = new ArrayList<FieldSchema>(_list1002.size);
++                  FieldSchema _elem1003;
++                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
                    {
-                     _elem1011 = new FieldSchema();
-                     _elem1011.read(iprot);
-                     struct.success.add(_elem1011);
 -                    _elem987 = new FieldSchema();
 -                    _elem987.read(iprot);
 -                    struct.success.add(_elem987);
++                    _elem1003 = new FieldSchema();
++                    _elem1003.read(iprot);
++                    struct.success.add(_elem1003);
                    }
                    iprot.readListEnd();
                  }
@@@ -52361,9 -51556,9 +52361,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (FieldSchema _iter1013 : struct.success)
 -            for (FieldSchema _iter989 : struct.success)
++            for (FieldSchema _iter1005 : struct.success)
              {
-               _iter1013.write(oprot);
 -              _iter989.write(oprot);
++              _iter1005.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -52418,9 -51613,9 +52418,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (FieldSchema _iter1014 : struct.success)
 -            for (FieldSchema _iter990 : struct.success)
++            for (FieldSchema _iter1006 : struct.success)
              {
-               _iter1014.write(oprot);
 -              _iter990.write(oprot);
++              _iter1006.write(oprot);
              }
            }
          }
@@@ -52441,14 -51636,14 +52441,14 @@@
          BitSet incoming = iprot.readBitSet(4);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new ArrayList<FieldSchema>(_list1015.size);
-             FieldSchema _elem1016;
-             for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
 -            org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new ArrayList<FieldSchema>(_list991.size);
 -            FieldSchema _elem992;
 -            for (int _i993 = 0; _i993 < _list991.size; ++_i993)
++            org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<FieldSchema>(_list1007.size);
++            FieldSchema _elem1008;
++            for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009)
              {
-               _elem1016 = new FieldSchema();
-               _elem1016.read(iprot);
-               struct.success.add(_elem1016);
 -              _elem992 = new FieldSchema();
 -              _elem992.read(iprot);
 -              struct.success.add(_elem992);
++              _elem1008 = new FieldSchema();
++              _elem1008.read(iprot);
++              struct.success.add(_elem1008);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -53602,14 -52797,14 +53602,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
-                   struct.success = new ArrayList<FieldSchema>(_list1018.size);
-                   FieldSchema _elem1019;
-                   for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
 -                  org.apache.thrift.protocol.TList _list994 = iprot.readListBegin();
 -                  struct.success = new ArrayList<FieldSchema>(_list994.size);
 -                  FieldSchema _elem995;
 -                  for (int _i996 = 0; _i996 < _list994.size; ++_i996)
++                  org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin();
++                  struct.success = new ArrayList<FieldSchema>(_list1010.size);
++                  FieldSchema _elem1011;
++                  for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012)
                    {
-                     _elem1019 = new FieldSchema();
-                     _elem1019.read(iprot);
-                     struct.success.add(_elem1019);
 -                    _elem995 = new FieldSchema();
 -                    _elem995.read(iprot);
 -                    struct.success.add(_elem995);
++                    _elem1011 = new FieldSchema();
++                    _elem1011.read(iprot);
++                    struct.success.add(_elem1011);
                    }
                    iprot.readListEnd();
                  }
@@@ -53662,9 -52857,9 +53662,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (FieldSchema _iter1021 : struct.success)
 -            for (FieldSchema _iter997 : struct.success)
++            for (FieldSchema _iter1013 : struct.success)
              {
-               _iter1021.write(oprot);
 -              _iter997.write(oprot);
++              _iter1013.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -53719,9 -52914,9 +53719,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (FieldSchema _iter1022 : struct.success)
 -            for (FieldSchema _iter998 : struct.success)
++            for (FieldSchema _iter1014 : struct.success)
              {
-               _iter1022.write(oprot);
 -              _iter998.write(oprot);
++              _iter1014.write(oprot);
              }
            }
          }
@@@ -53742,14 -52937,14 +53742,14 @@@
          BitSet incoming = iprot.readBitSet(4);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new ArrayList<FieldSchema>(_list1023.size);
-             FieldSchema _elem1024;
-             for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025)
 -            org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new ArrayList<FieldSchema>(_list999.size);
 -            FieldSchema _elem1000;
 -            for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001)
++            org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<FieldSchema>(_list1015.size);
++            FieldSchema _elem1016;
++            for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017)
              {
-               _elem1024 = new FieldSchema();
-               _elem1024.read(iprot);
-               struct.success.add(_elem1024);
 -              _elem1000 = new FieldSchema();
 -              _elem1000.read(iprot);
 -              struct.success.add(_elem1000);
++              _elem1016 = new FieldSchema();
++              _elem1016.read(iprot);
++              struct.success.add(_elem1016);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -56878,14 -56073,14 +56878,14 @@@
              case 2: // PRIMARY_KEYS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin();
-                   struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1026.size);
-                   SQLPrimaryKey _elem1027;
-                   for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028)
 -                  org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin();
 -                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1002.size);
 -                  SQLPrimaryKey _elem1003;
 -                  for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004)
++                  org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin();
++                  struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1018.size);
++                  SQLPrimaryKey _elem1019;
++                  for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020)
                    {
-                     _elem1027 = new SQLPrimaryKey();
-                     _elem1027.read(iprot);
-                     struct.primaryKeys.add(_elem1027);
 -                    _elem1003 = new SQLPrimaryKey();
 -                    _elem1003.read(iprot);
 -                    struct.primaryKeys.add(_elem1003);
++                    _elem1019 = new SQLPrimaryKey();
++                    _elem1019.read(iprot);
++                    struct.primaryKeys.add(_elem1019);
                    }
                    iprot.readListEnd();
                  }
@@@ -56897,14 -56092,14 +56897,14 @@@
              case 3: // FOREIGN_KEYS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1029 = iprot.readListBegin();
-                   struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1029.size);
-                   SQLForeignKey _elem1030;
-                   for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031)
 -                  org.apache.thrift.protocol.TList _list1005 = iprot.readListBegin();
 -                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1005.size);
 -                  SQLForeignKey _elem1006;
 -                  for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007)
++                  org.apache.thrift.protocol.TList _list1021 = iprot.readListBegin();
++                  struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1021.size);
++                  SQLForeignKey _elem1022;
++                  for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023)
                    {
-                     _elem1030 = new SQLForeignKey();
-                     _elem1030.read(iprot);
-                     struct.foreignKeys.add(_elem1030);
 -                    _elem1006 = new SQLForeignKey();
 -                    _elem1006.read(iprot);
 -                    struct.foreignKeys.add(_elem1006);
++                    _elem1022 = new SQLForeignKey();
++                    _elem1022.read(iprot);
++                    struct.foreignKeys.add(_elem1022);
                    }
                    iprot.readListEnd();
                  }
@@@ -56916,14 -56111,14 +56916,14 @@@
              case 4: // UNIQUE_CONSTRAINTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin();
-                   struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1032.size);
-                   SQLUniqueConstraint _elem1033;
-                   for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
 -                  org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin();
 -                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1008.size);
 -                  SQLUniqueConstraint _elem1009;
 -                  for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010)
++                  org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin();
++                  struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1024.size);
++                  SQLUniqueConstraint _elem1025;
++                  for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026)
                    {
-                     _elem1033 = new SQLUniqueConstraint();
-                     _elem1033.read(iprot);
-                     struct.uniqueConstraints.add(_elem1033);
 -                    _elem1009 = new SQLUniqueConstraint();
 -                    _elem1009.read(iprot);
 -                    struct.uniqueConstraints.add(_elem1009);
++                    _elem1025 = new SQLUniqueConstraint();
++                    _elem1025.read(iprot);
++                    struct.uniqueConstraints.add(_elem1025);
                    }
                    iprot.readListEnd();
                  }
@@@ -56935,14 -56130,14 +56935,14 @@@
              case 5: // NOT_NULL_CONSTRAINTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1035 = iprot.readListBegin();
-                   struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1035.size);
-                   SQLNotNullConstraint _elem1036;
-                   for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037)
 -                  org.apache.thrift.protocol.TList _list1011 = iprot.readListBegin();
 -                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1011.size);
 -                  SQLNotNullConstraint _elem1012;
 -                  for (int _i1013 = 0; _i1013 < _list1011.size; ++_i1013)
++                  org.apache.thrift.protocol.TList _list1027 = iprot.readListBegin();
++                  struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1027.size);
++                  SQLNotNullConstraint _elem1028;
++                  for (int _i1029 = 0; _i1029 < _list1027.size; ++_i1029)
                    {
-                     _elem1036 = new SQLNotNullConstraint();
-                     _elem1036.read(iprot);
-                     struct.notNullConstraints.add(_elem1036);
 -                    _elem1012 = new SQLNotNullConstraint();
 -                    _elem1012.read(iprot);
 -                    struct.notNullConstraints.add(_elem1012);
++                    _elem1028 = new SQLNotNullConstraint();
++                    _elem1028.read(iprot);
++                    struct.notNullConstraints.add(_elem1028);
                    }
                    iprot.readListEnd();
                  }
@@@ -56954,14 -56149,14 +56954,14 @@@
              case 6: // DEFAULT_CONSTRAINTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1038 = iprot.readListBegin();
-                   struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1038.size);
-                   SQLDefaultConstraint _elem1039;
-                   for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040)
 -                  org.apache.thrift.protocol.TList _list1014 = iprot.readListBegin();
 -                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1014.size);
 -                  SQLDefaultConstraint _elem1015;
 -                  for (int _i1016 = 0; _i1016 < _list1014.size; ++_i1016)
++                  org.apache.thrift.protocol.TList _list1030 = iprot.readListBegin();
++                  struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1030.size);
++                  SQLDefaultConstraint _elem1031;
++                  for (int _i1032 = 0; _i1032 < _list1030.size; ++_i1032)
                    {
-                     _elem1039 = new SQLDefaultConstraint();
-                     _elem1039.read(iprot);
-                     struct.defaultConstraints.add(_elem1039);
 -                    _elem1015 = new SQLDefaultConstraint();
 -                    _elem1015.read(iprot);
 -                    struct.defaultConstraints.add(_elem1015);
++                    _elem1031 = new SQLDefaultConstraint();
++                    _elem1031.read(iprot);
++                    struct.defaultConstraints.add(_elem1031);
                    }
                    iprot.readListEnd();
                  }
@@@ -56973,14 -56168,14 +56973,14 @@@
              case 7: // CHECK_CONSTRAINTS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1041 = iprot.readListBegin();
-                   struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1041.size);
-                   SQLCheckConstraint _elem1042;
-                   for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043)
 -                  org.apache.thrift.protocol.TList _list1017 = iprot.readListBegin();
 -                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1017.size);
 -                  SQLCheckConstraint _elem1018;
 -                  for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019)
++                  org.apache.thrift.protocol.TList _list1033 = iprot.readListBegin();
++                  struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1033.size);
++                  SQLCheckConstraint _elem1034;
++                  for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035)
                    {
-                     _elem1042 = new SQLCheckConstraint();
-                     _elem1042.read(iprot);
-                     struct.checkConstraints.add(_elem1042);
 -                    _elem1018 = new SQLCheckConstraint();
 -                    _elem1018.read(iprot);
 -                    struct.checkConstraints.add(_elem1018);
++                    _elem1034 = new SQLCheckConstraint();
++                    _elem1034.read(iprot);
++                    struct.checkConstraints.add(_elem1034);
                    }
                    iprot.readListEnd();
                  }
@@@ -57011,9 -56206,9 +57011,9 @@@
            oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
-             for (SQLPrimaryKey _iter1044 : struct.primaryKeys)
 -            for (SQLPrimaryKey _iter1020 : struct.primaryKeys)
++            for (SQLPrimaryKey _iter1036 : struct.primaryKeys)
              {
-               _iter1044.write(oprot);
 -              _iter1020.write(oprot);
++              _iter1036.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57023,9 -56218,9 +57023,9 @@@
            oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
-             for (SQLForeignKey _iter1045 : struct.foreignKeys)
 -            for (SQLForeignKey _iter1021 : struct.foreignKeys)
++            for (SQLForeignKey _iter1037 : struct.foreignKeys)
              {
-               _iter1045.write(oprot);
 -              _iter1021.write(oprot);
++              _iter1037.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57035,9 -56230,9 +57035,9 @@@
            oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size()));
-             for (SQLUniqueConstraint _iter1046 : struct.uniqueConstraints)
 -            for (SQLUniqueConstraint _iter1022 : struct.uniqueConstraints)
++            for (SQLUniqueConstraint _iter1038 : struct.uniqueConstraints)
              {
-               _iter1046.write(oprot);
 -              _iter1022.write(oprot);
++              _iter1038.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57047,9 -56242,9 +57047,9 @@@
            oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size()));
-             for (SQLNotNullConstraint _iter1047 : struct.notNullConstraints)
 -            for (SQLNotNullConstraint _iter1023 : struct.notNullConstraints)
++            for (SQLNotNullConstraint _iter1039 : struct.notNullConstraints)
              {
-               _iter1047.write(oprot);
 -              _iter1023.write(oprot);
++              _iter1039.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57059,9 -56254,9 +57059,9 @@@
            oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size()));
-             for (SQLDefaultConstraint _iter1048 : struct.defaultConstraints)
 -            for (SQLDefaultConstraint _iter1024 : struct.defaultConstraints)
++            for (SQLDefaultConstraint _iter1040 : struct.defaultConstraints)
              {
-               _iter1048.write(oprot);
 -              _iter1024.write(oprot);
++              _iter1040.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57071,9 -56266,9 +57071,9 @@@
            oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size()));
-             for (SQLCheckConstraint _iter1049 : struct.checkConstraints)
 -            for (SQLCheckConstraint _iter1025 : struct.checkConstraints)
++            for (SQLCheckConstraint _iter1041 : struct.checkConstraints)
              {
-               _iter1049.write(oprot);
 -              _iter1025.write(oprot);
++              _iter1041.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -57125,54 -56320,54 +57125,54 @@@
          if (struct.isSetPrimaryKeys()) {
            {
              oprot.writeI32(struct.primaryKeys.size());
-             for (SQLPrimaryKey _iter1050 : struct.primaryKeys)
 -            for (SQLPrimaryKey _iter1026 : struct.primaryKeys)
++            for (SQLPrimaryKey _iter1042 : struct.primaryKeys)
              {
-               _iter1050.write(oprot);
 -              _iter1026.write(oprot);
++              _iter1042.write(oprot);
              }
            }
          }
          if (struct.isSetForeignKeys()) {
            {
              oprot.writeI32(struct.foreignKeys.size());
-             for (SQLForeignKey _iter1051 : struct.foreignKeys)
 -            for (SQLForeignKey _iter1027 : struct.foreignKeys)
++            for (SQLForeignKey _iter1043 : struct.foreignKeys)
              {
-               _iter1051.write(oprot);
 -              _iter1027.write(oprot);
++              _iter1043.write(oprot);
              }
            }
          }
          if (struct.isSetUniqueConstraints()) {
            {
              oprot.writeI32(struct.uniqueConstraints.size());
-             for (SQLUniqueConstraint _iter1052 : struct.uniqueConstraints)
 -            for (SQLUniqueConstraint _iter1028 : struct.uniqueConstraints)
++            for (SQLUniqueConstraint _iter1044 : struct.uniqueConstraints)
              {
-               _iter1052.write(oprot);
 -              _iter1028.write(oprot);
++              _iter1044.write(oprot);
              }
            }
          }
          if (struct.isSetNotNullConstraints()) {
            {
              oprot.writeI32(struct.notNullConstraints.size());
-             for (SQLNotNullConstraint _iter1053 : struct.notNullConstraints)
 -            for (SQLNotNullConstraint _iter1029 : struct.notNullConstraints)
++            for (SQLNotNullConstraint _iter1045 : struct.notNullConstraints)
              {
-               _iter1053.write(oprot);
 -              _iter1029.write(oprot);
++              _iter1045.write(oprot);
              }
            }
          }
          if (struct.isSetDefaultConstraints()) {
            {
              oprot.writeI32(struct.defaultConstraints.size());
-             for (SQLDefaultConstraint _iter1054 : struct.defaultConstraints)
 -            for (SQLDefaultConstraint _iter1030 : struct.defaultConstraints)
++            for (SQLDefaultConstraint _iter1046 : struct.defaultConstraints)
              {
-               _iter1054.write(oprot);
 -              _iter1030.write(oprot);
++              _iter1046.write(oprot);
              }
            }
          }
          if (struct.isSetCheckConstraints()) {
            {
              oprot.writeI32(struct.checkConstraints.size());
-             for (SQLCheckConstraint _iter1055 : struct.checkConstraints)
 -            for (SQLCheckConstraint _iter1031 : struct.checkConstraints)
++            for (SQLCheckConstraint _iter1047 : struct.checkConstraints)
              {
-               _iter1055.write(oprot);
 -              _iter1031.write(oprot);
++              _iter1047.write(oprot);
              }
            }
          }
@@@ -57189,84 -56384,84 +57189,84 @@@
          }
          if (incoming.get(1)) {
            {
-             org.apache.thrift.protocol.TList _list1056 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1056.size);
-             SQLPrimaryKey _elem1057;
-             for (int _i1058 = 0; _i1058 < _list1056.size; ++_i1058)
 -            org.apache.thrift.protocol.TList _list1032 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1032.size);
 -            SQLPrimaryKey _elem1033;
 -            for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034)
++            org.apache.thrift.protocol.TList _list1048 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1048.size);
++            SQLPrimaryKey _elem1049;
++            for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050)
              {
-               _elem1057 = new SQLPrimaryKey();
-               _elem1057.read(iprot);
-               struct.primaryKeys.add(_elem1057);
 -              _elem1033 = new SQLPrimaryKey();
 -              _elem1033.read(iprot);
 -              struct.primaryKeys.add(_elem1033);
++              _elem1049 = new SQLPrimaryKey();
++              _elem1049.read(iprot);
++              struct.primaryKeys.add(_elem1049);
              }
            }
            struct.setPrimaryKeysIsSet(true);
          }
          if (incoming.get(2)) {
            {
-             org.apache.thrift.protocol.TList _list1059 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1059.size);
-             SQLForeignKey _elem1060;
-             for (int _i1061 = 0; _i1061 < _list1059.size; ++_i1061)
 -            org.apache.thrift.protocol.TList _list1035 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1035.size);
 -            SQLForeignKey _elem1036;
 -            for (int _i1037 = 0; _i1037 < _list1035.size; ++_i1037)
++            org.apache.thrift.protocol.TList _list1051 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1051.size);
++            SQLForeignKey _elem1052;
++            for (int _i1053 = 0; _i1053 < _list1051.size; ++_i1053)
              {
-               _elem1060 = new SQLForeignKey();
-               _elem1060.read(iprot);
-               struct.foreignKeys.add(_elem1060);
 -              _elem1036 = new SQLForeignKey();
 -              _elem1036.read(iprot);
 -              struct.foreignKeys.add(_elem1036);
++              _elem1052 = new SQLForeignKey();
++              _elem1052.read(iprot);
++              struct.foreignKeys.add(_elem1052);
              }
            }
            struct.setForeignKeysIsSet(true);
          }
          if (incoming.get(3)) {
            {
-             org.apache.thrift.protocol.TList _list1062 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1062.size);
-             SQLUniqueConstraint _elem1063;
-             for (int _i1064 = 0; _i1064 < _list1062.size; ++_i1064)
 -            org.apache.thrift.protocol.TList _list1038 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1038.size);
 -            SQLUniqueConstraint _elem1039;
 -            for (int _i1040 = 0; _i1040 < _list1038.size; ++_i1040)
++            org.apache.thrift.protocol.TList _list1054 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1054.size);
++            SQLUniqueConstraint _elem1055;
++            for (int _i1056 = 0; _i1056 < _list1054.size; ++_i1056)
              {
-               _elem1063 = new SQLUniqueConstraint();
-               _elem1063.read(iprot);
-               struct.uniqueConstraints.add(_elem1063);
 -              _elem1039 = new SQLUniqueConstraint();
 -              _elem1039.read(iprot);
 -              struct.uniqueConstraints.add(_elem1039);
++              _elem1055 = new SQLUniqueConstraint();
++              _elem1055.read(iprot);
++              struct.uniqueConstraints.add(_elem1055);
              }
            }
            struct.setUniqueConstraintsIsSet(true);
          }
          if (incoming.get(4)) {
            {
-             org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1065.size);
-             SQLNotNullConstraint _elem1066;
-             for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067)
 -            org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1041.size);
 -            SQLNotNullConstraint _elem1042;
 -            for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043)
++            org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1057.size);
++            SQLNotNullConstraint _elem1058;
++            for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059)
              {
-               _elem1066 = new SQLNotNullConstraint();
-               _elem1066.read(iprot);
-               struct.notNullConstraints.add(_elem1066);
 -              _elem1042 = new SQLNotNullConstraint();
 -              _elem1042.read(iprot);
 -              struct.notNullConstraints.add(_elem1042);
++              _elem1058 = new SQLNotNullConstraint();
++              _elem1058.read(iprot);
++              struct.notNullConstraints.add(_elem1058);
              }
            }
            struct.setNotNullConstraintsIsSet(true);
          }
          if (incoming.get(5)) {
            {
-             org.apache.thrift.protocol.TList _list1068 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1068.size);
-             SQLDefaultConstraint _elem1069;
-             for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070)
 -            org.apache.thrift.protocol.TList _list1044 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1044.size);
 -            SQLDefaultConstraint _elem1045;
 -            for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046)
++            org.apache.thrift.protocol.TList _list1060 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1060.size);
++            SQLDefaultConstraint _elem1061;
++            for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062)
              {
-               _elem1069 = new SQLDefaultConstraint();
-               _elem1069.read(iprot);
-               struct.defaultConstraints.add(_elem1069);
 -              _elem1045 = new SQLDefaultConstraint();
 -              _elem1045.read(iprot);
 -              struct.defaultConstraints.add(_elem1045);
++              _elem1061 = new SQLDefaultConstraint();
++              _elem1061.read(iprot);
++              struct.defaultConstraints.add(_elem1061);
              }
            }
            struct.setDefaultConstraintsIsSet(true);
          }
          if (incoming.get(6)) {
            {
-             org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1071.size);
-             SQLCheckConstraint _elem1072;
-             for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
 -            org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1047.size);
 -            SQLCheckConstraint _elem1048;
 -            for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049)
++            org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1063.size);
++            SQLCheckConstraint _elem1064;
++            for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065)
              {
-               _elem1072 = new SQLCheckConstraint();
-               _elem1072.read(iprot);
-               struct.checkConstraints.add(_elem1072);
 -              _elem1048 = new SQLCheckConstraint();
 -              _elem1048.read(iprot);
 -              struct.checkConstraints.add(_elem1048);
++              _elem1064 = new SQLCheckConstraint();
++              _elem1064.read(iprot);
++              struct.checkConstraints.add(_elem1064);
              }
            }
            struct.setCheckConstraintsIsSet(true);
@@@ -66416,13 -65611,13 +66416,13 @@@
              case 3: // PART_NAMES
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
-                   struct.partNames = new ArrayList<String>(_list1074.size);
-                   String _elem1075;
-                   for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
 -                  org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin();
 -                  struct.partNames = new ArrayList<String>(_list1050.size);
 -                  String _elem1051;
 -                  for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052)
++                  org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
++                  struct.partNames = new ArrayList<String>(_list1066.size);
++                  String _elem1067;
++                  for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
                    {
-                     _elem1075 = iprot.readString();
-                     struct.partNames.add(_elem1075);
 -                    _elem1051 = iprot.readString();
 -                    struct.partNames.add(_elem1051);
++                    _elem1067 = iprot.readString();
++                    struct.partNames.add(_elem1067);
                    }
                    iprot.readListEnd();
                  }
@@@ -66458,9 -65653,9 +66458,9 @@@
            oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
-             for (String _iter1077 : struct.partNames)
 -            for (String _iter1053 : struct.partNames)
++            for (String _iter1069 : struct.partNames)
              {
-               oprot.writeString(_iter1077);
 -              oprot.writeString(_iter1053);
++              oprot.writeString(_iter1069);
              }
              oprot.writeListEnd();
            }
@@@ -66503,9 -65698,9 +66503,9 @@@
          if (struct.isSetPartNames()) {
            {
              oprot.writeI32(struct.partNames.size());
-             for (String _iter1078 : struct.partNames)
 -            for (String _iter1054 : struct.partNames)
++            for (String _iter1070 : struct.partNames)
              {
-               oprot.writeString(_iter1078);
 -              oprot.writeString(_iter1054);
++              oprot.writeString(_iter1070);
              }
            }
          }
@@@ -66525,13 -65720,13 +66525,13 @@@
          }
          if (incoming.get(2)) {
            {
-             org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.partNames = new ArrayList<String>(_list1079.size);
-             String _elem1080;
-             for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
 -            org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.partNames = new ArrayList<String>(_list1055.size);
 -            String _elem1056;
 -            for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057)
++            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.partNames = new ArrayList<String>(_list1071.size);
++            String _elem1072;
++            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
              {
-               _elem1080 = iprot.readString();
-               struct.partNames.add(_elem1080);
 -              _elem1056 = iprot.readString();
 -              struct.partNames.add(_elem1056);
++              _elem1072 = iprot.readString();
++              struct.partNames.add(_elem1072);
              }
            }
            struct.setPartNamesIsSet(true);
@@@ -68588,13 -68030,13 +68588,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list1082.size);
-                   String _elem1083;
-                   for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
 -                  org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin();
 -                  struct.success = new ArrayList<String>(_list1066.size);
 -                  String _elem1067;
 -                  for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068)
++                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list1074.size);
++                  String _elem1075;
++                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
                    {
-                     _elem1083 = iprot.readString();
-                     struct.success.add(_elem1083);
 -                    _elem1067 = iprot.readString();
 -                    struct.success.add(_elem1067);
++                    _elem1075 = iprot.readString();
++                    struct.success.add(_elem1075);
                    }
                    iprot.readListEnd();
                  }
@@@ -68629,9 -68071,9 +68629,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter1085 : struct.success)
 -            for (String _iter1069 : struct.success)
++            for (String _iter1077 : struct.success)
              {
-               oprot.writeString(_iter1085);
 -              oprot.writeString(_iter1069);
++              oprot.writeString(_iter1077);
              }
              oprot.writeListEnd();
            }
@@@ -68670,9 -68112,9 +68670,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (String _iter1086 : struct.success)
 -            for (String _iter1070 : struct.success)
++            for (String _iter1078 : struct.success)
              {
-               oprot.writeString(_iter1086);
 -              oprot.writeString(_iter1070);
++              oprot.writeString(_iter1078);
              }
            }
          }
@@@ -68687,13 -68129,13 +68687,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.success = new ArrayList<String>(_list1087.size);
-             String _elem1088;
-             for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
 -            org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.success = new ArrayList<String>(_list1071.size);
 -            String _elem1072;
 -            for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073)
++            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list1079.size);
++            String _elem1080;
++            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
              {
-               _elem1088 = iprot.readString();
-               struct.success.add(_elem1088);
 -              _elem1072 = iprot.readString();
 -              struct.success.add(_elem1072);
++              _elem1080 = iprot.readString();
++              struct.success.add(_elem1080);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -69667,13 -68901,13 +69667,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list1090.size);
-                   String _elem1091;
-                   for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
 -                  org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin();
 -                  struct.success = new ArrayList<String>(_list1074.size);
 -                  String _elem1075;
 -                  for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076)
++                  org.apache.thrift.protocol.TList _list1082 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list1082.size);
++                  String _elem1083;
++                  for (int _i1084 = 0; _i1084 < _list1082.size; ++_i1084)
                    {
-                     _elem1091 = iprot.readString();
-                     struct.success.add(_elem1091);
 -                    _elem1075 = iprot.readString();
 -                    struct.success.add(_elem1075);
++                    _elem1083 = iprot.readString();
++                    struct.success.add(_elem1083);
                    }
                    iprot.readListEnd();
                  }
@@@ -69708,9 -68942,9 +69708,9 @@@
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter1093 : struct.success)
 -            for (String _iter1077 : struct.success)
++            for (String _iter1085 : struct.success)
              {
-               oprot.writeString(_iter1093);
 -              oprot.writeString(_iter1077);
++              oprot.writeString(_iter1085);
              }
              oprot.writeListEnd();
            }
@@@ -69749,9 -68983,9 +69749,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (String _iter1094 : struct.success)
 -            for (String _iter1078 : struct.success)
++            for (String _iter1086 : struct.success)
              {
-               oprot.writeString(_iter1094);
 -              oprot.writeString(_iter1078);
++              oprot.writeString(_iter1086);
              }
            }
          }
@@@ -69766,13 -69000,13 +69766,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.success = new ArrayList<String>(_list1095.size);
-             String _elem1096;
-             for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
 -            org.apache.thrift.protocol.TList _list1079 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.success = new ArrayList<String>(_list1079.size);
 -            String _elem1080;
 -            for (int _i1081 = 0; _i1081 < _list1079.size; ++_i1081)
++            org.apache.thrift.protocol.TList _list1087 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list1087.size);
++            String _elem1088;
++            for (int _i1089 = 0; _i1089 < _list1087.size; ++_i1089)
              {
-               _elem1096 = iprot.readString();
-               struct.success.add(_elem1096);
 -              _elem1080 = iprot.readString();
 -              struct.success.add(_elem1080);
++              _elem1088 = iprot.readString();
++              struct.success.add(_elem1088);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -70538,13 -70032,14 +70538,13 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list1098.size);
-                   String _elem1099;
-                   for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
+                   org.apache.thrift.protocol.TList _list1090 = iprot.readListBegin();
 -                  struct.success = new ArrayList<TableMeta>(_list1090.size);
 -                  TableMeta _elem1091;
++                  struct.success = new ArrayList<String>(_list1090.size);
++                  String _elem1091;
+                   for (int _i1092 = 0; _i1092 < _list1090.size; ++_i1092)
                    {
-                     _elem1099 = iprot.readString();
-                     struct.success.add(_elem1099);
 -                    _elem1091 = new TableMeta();
 -                    _elem1091.read(iprot);
++                    _elem1091 = iprot.readString();
+                     struct.success.add(_elem1091);
                    }
                    iprot.readListEnd();
                  }
@@@ -70578,10 -70073,10 +70578,10 @@@
          if (struct.success != null) {
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
 -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
 -            for (TableMeta _iter1093 : struct.success)
 +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter1101 : struct.success)
++            for (String _iter1093 : struct.success)
              {
-               oprot.writeString(_iter1101);
 -              _iter1093.write(oprot);
++              oprot.writeString(_iter1093);
              }
              oprot.writeListEnd();
            }
@@@ -70620,9 -70115,9 +70620,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (String _iter1102 : struct.success)
 -            for (TableMeta _iter1094 : struct.success)
++            for (String _iter1094 : struct.success)
              {
-               oprot.writeString(_iter1102);
 -              _iter1094.write(oprot);
++              oprot.writeString(_iter1094);
              }
            }
          }
@@@ -70637,13 -70132,14 +70637,13 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.success = new ArrayList<String>(_list1103.size);
-             String _elem1104;
-             for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
 -            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
 -            struct.success = new ArrayList<TableMeta>(_list1095.size);
 -            TableMeta _elem1096;
++            org.apache.thrift.protocol.TList _list1095 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.success = new ArrayList<String>(_list1095.size);
++            String _elem1096;
+             for (int _i1097 = 0; _i1097 < _list1095.size; ++_i1097)
              {
-               _elem1104 = iprot.readString();
-               struct.success.add(_elem1104);
 -              _elem1096 = new TableMeta();
 -              _elem1096.read(iprot);
++              _elem1096 = iprot.readString();
+               struct.success.add(_elem1096);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -71129,36 -70446,10 +71129,36 @@@
              break;
            }
            switch (schemeField.id) {
 -            case 1: // DB_NAME
 +            case 1: // DB_PATTERNS
                if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 -                struct.db_name = iprot.readString();
 -                struct.setDb_nameIsSet(true);
 +                struct.db_patterns = iprot.readString();
 +                struct.setDb_patternsIsSet(true);
 +              } else { 
 +                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +              }
 +              break;
 +            case 2: // TBL_PATTERNS
 +              if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
 +                struct.tbl_patterns = iprot.readString();
 +                struct.setTbl_patternsIsSet(true);
 +              } else { 
 +                org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
 +              }
 +              break;
 +            case 3: // TBL_TYPES
 +              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 +                {
-                   org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
-                   struct.tbl_types = new ArrayList<String>(_list1106.size);
-                   String _elem1107;
-                   for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
++                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
++                  struct.tbl_types = new ArrayList<String>(_list1098.size);
++                  String _elem1099;
++                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
 +                  {
-                     _elem1107 = iprot.readString();
-                     struct.tbl_types.add(_elem1107);
++                    _elem1099 = iprot.readString();
++                    struct.tbl_types.add(_elem1099);
 +                  }
 +                  iprot.readListEnd();
 +                }
 +                struct.setTbl_typesIsSet(true);
                } else { 
                  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
                }
@@@ -71176,26 -70467,9 +71176,26 @@@
          struct.validate();
  
          oprot.writeStructBegin(STRUCT_DESC);
 -        if (struct.db_name != null) {
 -          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
 -          oprot.writeString(struct.db_name);
 +        if (struct.db_patterns != null) {
 +          oprot.writeFieldBegin(DB_PATTERNS_FIELD_DESC);
 +          oprot.writeString(struct.db_patterns);
 +          oprot.writeFieldEnd();
 +        }
 +        if (struct.tbl_patterns != null) {
 +          oprot.writeFieldBegin(TBL_PATTERNS_FIELD_DESC);
 +          oprot.writeString(struct.tbl_patterns);
 +          oprot.writeFieldEnd();
 +        }
 +        if (struct.tbl_types != null) {
 +          oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC);
 +          {
 +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size()));
-             for (String _iter1109 : struct.tbl_types)
++            for (String _iter1101 : struct.tbl_types)
 +            {
-               oprot.writeString(_iter1109);
++              oprot.writeString(_iter1101);
 +            }
 +            oprot.writeListEnd();
 +          }
            oprot.writeFieldEnd();
          }
          oprot.writeFieldStop();
@@@ -71210,63 -70484,28 +71210,63 @@@
        }
      }
  
 -    private static class get_all_tables_argsTupleScheme extends TupleScheme<get_all_tables_args> {
 +    private static class get_table_meta_argsTupleScheme extends TupleScheme<get_table_meta_args> {
  
        @Override
 -      public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException {
 +      public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException {
          TTupleProtocol oprot = (TTupleProtocol) prot;
          BitSet optionals = new BitSet();
 -        if (struct.isSetDb_name()) {
 +        if (struct.isSetDb_patterns()) {
            optionals.set(0);
          }
 -        oprot.writeBitSet(optionals, 1);
 -        if (struct.isSetDb_name()) {
 -          oprot.writeString(struct.db_name);
 +        if (struct.isSetTbl_patterns()) {
 +          optionals.set(1);
 +        }
 +        if (struct.isSetTbl_types()) {
 +          optionals.set(2);
 +        }
 +        oprot.writeBitSet(optionals, 3);
 +        if (struct.isSetDb_patterns()) {
 +          oprot.writeString(struct.db_patterns);
 +        }
 +        if (struct.isSetTbl_patterns()) {
 +          oprot.writeString(struct.tbl_patterns);
 +        }
 +        if (struct.isSetTbl_types()) {
 +          {
 +            oprot.writeI32(struct.tbl_types.size());
-             for (String _iter1110 : struct.tbl_types)
++            for (String _iter1102 : struct.tbl_types)
 +            {
-               oprot.writeString(_iter1110);
++              oprot.writeString(_iter1102);
 +            }
 +          }
          }
        }
  
        @Override
 -      public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_args struct) throws org.apache.thrift.TException {
 +      public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args struct) throws org.apache.thrift.TException {
          TTupleProtocol iprot = (TTupleProtocol) prot;
 -        BitSet incoming = iprot.readBitSet(1);
 +        BitSet incoming = iprot.readBitSet(3);
          if (incoming.get(0)) {
 -          struct.db_name = iprot.readString();
 -          struct.setDb_nameIsSet(true);
 +          struct.db_patterns = iprot.readString();
 +          struct.setDb_patternsIsSet(true);
 +        }
 +        if (incoming.get(1)) {
 +          struct.tbl_patterns = iprot.readString();
 +          struct.setTbl_patternsIsSet(true);
 +        }
 +        if (incoming.get(2)) {
 +          {
-             org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
-             struct.tbl_types = new ArrayList<String>(_list1111.size);
-             String _elem1112;
-             for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
++            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
++            struct.tbl_types = new ArrayList<String>(_list1103.size);
++            String _elem1104;
++            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
 +            {
-               _elem1112 = iprot.readString();
-               struct.tbl_types.add(_elem1112);
++              _elem1104 = iprot.readString();
++              struct.tbl_types.add(_elem1104);
 +            }
 +          }
 +          struct.setTbl_typesIsSet(true);
          }
        }
      }
@@@ -71669,14 -70905,13 +71669,14 @@@
              case 0: // SUCCESS
                if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
                  {
-                   org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
-                   struct.success = new ArrayList<TableMeta>(_list1114.size);
-                   TableMeta _elem1115;
-                   for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
 -                  org.apache.thrift.protocol.TList _list1098 = iprot.readListBegin();
 -                  struct.success = new ArrayList<String>(_list1098.size);
 -                  String _elem1099;
 -                  for (int _i1100 = 0; _i1100 < _list1098.size; ++_i1100)
++                  org.apache.thrift.protocol.TList _list1106 = iprot.readListBegin();
++                  struct.success = new ArrayList<TableMeta>(_list1106.size);
++                  TableMeta _elem1107;
++                  for (int _i1108 = 0; _i1108 < _list1106.size; ++_i1108)
                    {
-                     _elem1115 = new TableMeta();
-                     _elem1115.read(iprot);
-                     struct.success.add(_elem1115);
 -                    _elem1099 = iprot.readString();
 -                    struct.success.add(_elem1099);
++                    _elem1107 = new TableMeta();
++                    _elem1107.read(iprot);
++                    struct.success.add(_elem1107);
                    }
                    iprot.readListEnd();
                  }
@@@ -71710,10 -70945,10 +71710,10 @@@
          if (struct.success != null) {
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
            {
 -            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
 -            for (String _iter1101 : struct.success)
 +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size()));
-             for (TableMeta _iter1117 : struct.success)
++            for (TableMeta _iter1109 : struct.success)
              {
-               _iter1117.write(oprot);
 -              oprot.writeString(_iter1101);
++              _iter1109.write(oprot);
              }
              oprot.writeListEnd();
            }
@@@ -71752,9 -70987,9 +71752,9 @@@
          if (struct.isSetSuccess()) {
            {
              oprot.writeI32(struct.success.size());
-             for (TableMeta _iter1118 : struct.success)
 -            for (String _iter1102 : struct.success)
++            for (TableMeta _iter1110 : struct.success)
              {
-               _iter1118.write(oprot);
 -              oprot.writeString(_iter1102);
++              _iter1110.write(oprot);
              }
            }
          }
@@@ -71769,14 -71004,13 +71769,14 @@@
          BitSet incoming = iprot.readBitSet(2);
          if (incoming.get(0)) {
            {
-             org.apache.thrift.protocol.TList _list1119 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
-             struct.success = new ArrayList<TableMeta>(_list1119.size);
-             TableMeta _elem1120;
-             for (int _i1121 = 0; _i1121 < _list1119.size; ++_i1121)
 -            org.apache.thrift.protocol.TList _list1103 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
 -            struct.success = new ArrayList<String>(_list1103.size);
 -            String _elem1104;
 -            for (int _i1105 = 0; _i1105 < _list1103.size; ++_i1105)
++            org.apache.thrift.protocol.TList _list1111 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++            struct.success = new ArrayList<TableMeta>(_list1111.size);
++            TableMeta _elem1112;
++            for (int _i1113 = 0; _i1113 < _list1111.size; ++_i1113)
              {
-               _elem1120 = new TableMeta();
-               _elem1120.read(iprot);
-               struct.success.add(_elem1120);
 -              _elem1104 = iprot.readString();
 -              struct.success.add(_elem1104);
++              _elem1112 = new TableMeta();
++              _elem1112.read(iprot);
++              struct.success.add(_elem1112);
              }
            }
            struct.setSuccessIsSet(true);
@@@ -72540,18 -71945,9 +72540,18 @@@
            }
            switch (schemeField.id) {
              case 0: // SUCCESS
 -              if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
 -                struct.success = new Table();
 -                struct.success.read(iprot);
 +              if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
 +                {
-                   org.apache.thrift.protocol.TList _list1122 = iprot.readListBegin();
-                   struct.success = new ArrayList<String>(_list1122.size);
-                   String _elem1123;
-                   for (int _i1124 = 0; _i1124 < _list1122.size; ++_i1124)
++                  org.apache.thrift.protocol.TList _list1114 = iprot.readListBegin();
++                  struct.success = new ArrayList<String>(_list1114.size);
++                  String _elem1115;
++                  for (int _i1116 = 0; _i1116 < _list1114.size; ++_i1116)
 +                  {
-                     _elem1123 = iprot.readString();
-                     struct.success.add(_elem1123);
++                    _elem1115 = iprot.readString();
++                    struct.success.add(_elem1115);
 +                  }
 +                  iprot.readListEnd();
 +                }
                  struct.setSuccessIsSet(true);
                } else { 
                  org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
@@@ -72581,14 -71986,7 +72581,14 @@@
          oprot.writeStructBegin(STRUCT_DESC);
          if (struct.success != null) {
            oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
 -          struct.success.write(oprot);
 +          {
 +            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size()));
-             for (String _iter1125 : struct.success)
++            for (String _iter1117 : struct.success)
 +            {
-               oprot.writeString(_iter1125);
++              oprot.writeString(_iter1117);
 +            }
 +            oprot.writeListEnd();
 +          }
            oprot.writeFieldEnd();
          }
          if (struct.o1 != null) {
@@@ -72620,36 -72023,28 +72620,36 @@@
          if (struct.isSetO1()) {
            optionals.set(1);
          }
 -        if (struct.isSetO2()) {
 -          optionals.set(2);
 -        }
 -        oprot.writeBitSet(optionals, 3);
 +        oprot.writeBitSet(optionals, 2);
          if (struct.isSetSuccess()) {
 -          struct.success.write(oprot);
 -        }
 +          {
 +            oprot.writeI32(struct.success.size());
-             for (String _iter1126 : struct.success)
++            for (String _iter1118 : struct.success)
 +            {
-               oprot.writeString(_iter1126);
++              oprot.writeString(_iter1118);
 +            }
 +          }
 +        }
          if (struct.isSetO1()) {
            struct.o1.write(oprot);
          }
        }
  
        @Override
 -      public void read(org.apache.thrift.protocol.TProtocol prot, get_table_result struct) throws org.

<TRUNCATED>

[02/48] hive git commit: HIVE-20111: HBase-Hive (managed) table creation fails with strict managed table checks: Table is marked as a managed table but is not transactional (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out b/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out
index 6bd4f73..aa9a5ac 100644
Binary files a/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out and b/hbase-handler/src/test/results/positive/hbase_decimal_decimal.q.out differ

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out b/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out
index de1c139..0b8c853 100644
--- a/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out
@@ -2,17 +2,17 @@ PREHOOK: query: drop table if exists hb_target
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: drop table if exists hb_target
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: create table hb_target(key int, val string)
+PREHOOK: query: create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk')
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hb_target
-POSTHOOK: query: create table hb_target(key int, val string)
+POSTHOOK: query: create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk')
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hb_target
@@ -33,17 +33,17 @@ POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@hb_target
 POSTHOOK: Output: default@hb_target
 #### A masked pattern was here ####
-PREHOOK: query: create table hb_target(key int, val string)
+PREHOOK: query: create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk')
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hb_target
-POSTHOOK: query: create table hb_target(key int, val string)
+POSTHOOK: query: create external table hb_target(key int, val string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ('hbase.columns.mapping' = ':key,cf:val')
-tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk')
+tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk', 'external.table.purge' = 'true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hb_target

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_joins.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_joins.q.out b/hbase-handler/src/test/results/positive/hbase_joins.q.out
index 697675e..a591c4e 100644
--- a/hbase-handler/src/test/results/positive/hbase_joins.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_joins.q.out
@@ -14,51 +14,57 @@ PREHOOK: query: DROP TABLE users_level
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE users_level
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int)
+PREHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@users
-POSTHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int)
+POSTHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@users
-PREHOOK: query: CREATE TABLE states(key string, name string)
+PREHOOK: query: CREATE EXTERNAL TABLE states(key string, name string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "state:name"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@states
-POSTHOOK: query: CREATE TABLE states(key string, name string)
+POSTHOOK: query: CREATE EXTERNAL TABLE states(key string, name string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "state:name"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@states
-PREHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int)
+PREHOOK: query: CREATE EXTERNAL TABLE countries(key string, name string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:name,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@countries
-POSTHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int)
+POSTHOOK: query: CREATE EXTERNAL TABLE countries(key string, name string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:name,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@countries
@@ -218,27 +224,31 @@ POSTHOOK: query: DROP TABLE countries
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@countries
 POSTHOOK: Output: default@countries
-PREHOOK: query: CREATE TABLE users(key int, userid int, username string, created int) 
+PREHOOK: query: CREATE EXTERNAL TABLE users(key int, userid int, username string, created int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:nickname,f:created")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@users
-POSTHOOK: query: CREATE TABLE users(key int, userid int, username string, created int) 
+POSTHOOK: query: CREATE EXTERNAL TABLE users(key int, userid int, username string, created int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:nickname,f:created")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@users
-PREHOOK: query: CREATE TABLE users_level(key int, userid int, level int)
+PREHOOK: query: CREATE EXTERNAL TABLE users_level(key int, userid int, level int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:level")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@users_level
-POSTHOOK: query: CREATE TABLE users_level(key int, userid int, level int)
+POSTHOOK: query: CREATE EXTERNAL TABLE users_level(key int, userid int, level int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:level")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@users_level

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_null_first_col.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_null_first_col.q.out b/hbase-handler/src/test/results/positive/hbase_null_first_col.q.out
index bb4491b..47f56fc 100644
--- a/hbase-handler/src/test/results/positive/hbase_null_first_col.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_null_first_col.q.out
@@ -22,19 +22,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/null.txt' INTO TABLE s
 POSTHOOK: type: LOAD
 #### A masked pattern was here ####
 POSTHOOK: Output: default@src_null
-PREHOOK: query: CREATE TABLE hbase_null(key string, col1 string, col2 string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_null(key string, col1 string, col2 string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf1:c1,cf1:c2"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_null
-POSTHOOK: query: CREATE TABLE hbase_null(key string, col1 string, col2 string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_null(key string, col1 string, col2 string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf1:c1,cf1:c2"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_null

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out
index 0744caf..47327e5 100644
--- a/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out
@@ -14,17 +14,19 @@ PREHOOK: query: drop view if exists hive1_view_data_hbase2
 PREHOOK: type: DROPVIEW
 POSTHOOK: query: drop view if exists hive1_view_data_hbase2
 POSTHOOK: type: DROPVIEW
-PREHOOK: query: CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+PREHOOK: query: CREATE EXTERNAL TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hive1_tbl_data_hbase1
-POSTHOOK: query: CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hive1_tbl_data_hbase1
@@ -64,17 +66,19 @@ POSTHOOK: query: insert into table hive1_tbl_data_hbase1 select '00001','john','
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@src
 POSTHOOK: Output: default@hive1_tbl_data_hbase1
-PREHOOK: query: CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+PREHOOK: query: CREATE EXTERNAL TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hive1_tbl_data_hbase2
-POSTHOOK: query: CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' 
 WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" 
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hive1_tbl_data_hbase2

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
index 75294ed..4d44d1d 100644
--- a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out
@@ -1,12 +1,14 @@
-PREHOOK: query: CREATE TABLE hbase_pushdown(key string, value string) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key string, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_pushdown
-POSTHOOK: query: CREATE TABLE hbase_pushdown(key string, value string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key string, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_pushdown

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
index 2816164..909a5fa 100644
--- a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out
@@ -1,12 +1,14 @@
-PREHOOK: query: CREATE TABLE hbase_pushdown(key int, value string) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_pushdown
-POSTHOOK: query: CREATE TABLE hbase_pushdown(key int, value string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_pushdown

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_queries.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_queries.q.out b/hbase-handler/src/test/results/positive/hbase_queries.q.out
index 9fe9cab..c0184ba 100644
--- a/hbase-handler/src/test/results/positive/hbase_queries.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_queries.q.out
@@ -2,17 +2,17 @@ PREHOOK: query: DROP TABLE hbase_table_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_1
-POSTHOOK: query: CREATE TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_1(key int comment 'It is a column key', value string comment 'It is the column string value')
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
-TBLPROPERTIES ("hbase.table.name" = "hbase_table_0")
+TBLPROPERTIES ("hbase.table.name" = "hbase_table_0", "external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_1
@@ -395,15 +395,17 @@ PREHOOK: query: DROP TABLE empty_hbase_table
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE empty_hbase_table
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE empty_hbase_table(key int, value string) 
+PREHOOK: query: CREATE EXTERNAL TABLE empty_hbase_table(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@empty_hbase_table
-POSTHOOK: query: CREATE TABLE empty_hbase_table(key int, value string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE empty_hbase_table(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = "cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@empty_hbase_table
@@ -467,19 +469,21 @@ POSTHOOK: Input: default@src
 #### A masked pattern was here ####
 155
 500
-PREHOOK: query: CREATE TABLE hbase_table_3(key int, value string, count int) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_3(key int, value string, count int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:val,cf2:count"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_3
-POSTHOOK: query: CREATE TABLE hbase_table_3(key int, value string, count int) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_3(key int, value string, count int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:val,cf2:count"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_3
@@ -666,19 +670,21 @@ PREHOOK: query: DROP TABLE hbase_table_4
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_4
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_4
-POSTHOOK: query: CREATE TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_4(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_4
@@ -734,19 +740,21 @@ PREHOOK: query: DROP TABLE hbase_table_6
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_6
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_6(key int, value map<string,string>) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_6(key int, value map<string,string>) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_6
-POSTHOOK: query: CREATE TABLE hbase_table_6(key int, value map<string,string>) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_6(key int, value map<string,string>) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = ":key,cf:"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_6
@@ -774,19 +782,21 @@ PREHOOK: query: DROP TABLE hbase_table_7
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_7
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_7(value map<string,string>, key int) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_7(value map<string,string>, key int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:,:key"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_7
-POSTHOOK: query: CREATE TABLE hbase_table_7(value map<string,string>, key int) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_7(value map<string,string>, key int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "cf:,:key"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_7
@@ -816,19 +826,21 @@ PREHOOK: query: DROP TABLE hbase_table_8
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table_8
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_8
-POSTHOOK: query: CREATE TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_8(key int, value1 string, value2 int, value3 int) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "a:b,a:c,d:e"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_8
@@ -958,15 +970,17 @@ PREHOOK: query: DROP TABLE IF EXISTS hbase_table_9
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS hbase_table_9
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col#s:s,cf:str_col")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_9
-POSTHOOK: query: CREATE TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_9 (id bigint, data map<string, string>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col#s:s,cf:str_col")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_9
@@ -1027,15 +1041,17 @@ PREHOOK: query: DROP TABLE IF EXISTS hbase_table_10
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS hbase_table_10
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_10
-POSTHOOK: query: CREATE TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_10 (id bigint, data map<int, int>, str string)
 stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 with serdeproperties ("hbase.columns.mapping" = ":key,cf:map_col2,cf:str2_col")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_10
@@ -1096,15 +1112,17 @@ PREHOOK: query: DROP TABLE IF EXISTS hbase_table_11
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS hbase_table_11
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_11
-POSTHOOK: query: CREATE TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_11(id INT, map_column STRUCT<s_int:INT,s_string:STRING,s_date:DATE>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_11
@@ -1129,15 +1147,17 @@ PREHOOK: query: DROP TABLE IF EXISTS hbase_table_12
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS hbase_table_12
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table_12
-POSTHOOK: query: CREATE TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table_12(id INT, list_column ARRAY <STRING>)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ('hbase.columns.mapping'=':key,id:id')
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table_12

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_scan_params.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_scan_params.q.out b/hbase-handler/src/test/results/positive/hbase_scan_params.q.out
index 1ccaba0..5ae8b1a 100644
--- a/hbase-handler/src/test/results/positive/hbase_scan_params.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_scan_params.q.out
@@ -1,14 +1,16 @@
-PREHOOK: query: CREATE TABLE hbase_pushdown(key int, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string",
 "hbase.scan.cache" = "500", "hbase.scan.cacheblocks" = "true", "hbase.scan.batch" = "1")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_pushdown
-POSTHOOK: query: CREATE TABLE hbase_pushdown(key int, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_pushdown(key int, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string",
 "hbase.scan.cache" = "500", "hbase.scan.cacheblocks" = "true", "hbase.scan.batch" = "1")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_pushdown

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
index 86a9fea..7982f22 100644
--- a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out
@@ -6,15 +6,17 @@ POSTHOOK: query: CREATE TABLE src_x1(key string, value string)
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_x1
-PREHOOK: query: CREATE TABLE src_x2(key string, value string)
+PREHOOK: query: CREATE EXTERNAL TABLE src_x2(key string, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:value")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@src_x2
-POSTHOOK: query: CREATE TABLE src_x2(key string, value string)
+POSTHOOK: query: CREATE EXTERNAL TABLE src_x2(key string, value string)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key, cf:value")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@src_x2

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
index 6e7d7e6..9318cd7 100644
--- a/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_timestamp.q.out
@@ -2,15 +2,17 @@ PREHOOK: query: DROP TABLE hbase_table
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE hbase_table
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` timestamp)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` timestamp)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` timestamp)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` timestamp)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table
@@ -69,15 +71,17 @@ POSTHOOK: query: DROP TABLE hbase_table
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@hbase_table
 POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table
@@ -125,15 +129,17 @@ POSTHOOK: query: DROP TABLE hbase_table
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@hbase_table
 POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table (key string, value string, `time` bigint)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table (key string, value string, `time` bigint)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:string,:timestamp")
+  TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table
@@ -363,15 +369,17 @@ POSTHOOK: query: DROP TABLE hbase_table
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@hbase_table
 POSTHOOK: Output: default@hbase_table
-PREHOOK: query: CREATE TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_table
-POSTHOOK: query: CREATE TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_table(key string, value map<string, string>, `time` timestamp)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf:,:timestamp")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_table

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out
index a2a2f56..8156724 100644
--- a/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out
@@ -1,14 +1,14 @@
-PREHOOK: query: create table hbase_str(rowkey string,mytime string,mystr string)
+PREHOOK: query: create external table hbase_str(rowkey string,mytime string,mystr string)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ('hbase.columns.mapping' = 'm:mytime,m:mystr')
-  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts')
+  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts', 'external.table.purge' = 'true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_str
-POSTHOOK: query: create table hbase_str(rowkey string,mytime string,mystr string)
+POSTHOOK: query: create external table hbase_str(rowkey string,mytime string,mystr string)
   STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
   WITH SERDEPROPERTIES ('hbase.columns.mapping' = 'm:mytime,m:mystr')
-  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts')
+  TBLPROPERTIES ('hbase.table.name' = 'hbase_ts', 'external.table.purge' = 'true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_str

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbase_viewjoins.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbase_viewjoins.q.out b/hbase-handler/src/test/results/positive/hbase_viewjoins.q.out
index 95fcaa0..9eae99d 100644
--- a/hbase-handler/src/test/results/positive/hbase_viewjoins.q.out
+++ b/hbase-handler/src/test/results/positive/hbase_viewjoins.q.out
@@ -14,7 +14,7 @@ PREHOOK: query: DROP TABLE IF EXISTS HBASE_TABLE_TEST_1
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE IF EXISTS HBASE_TABLE_TEST_1
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE HBASE_TABLE_TEST_1(
+PREHOOK: query: CREATE EXTERNAL TABLE HBASE_TABLE_TEST_1(
   cvalue string ,
   pk string,
  ccount int   )
@@ -29,11 +29,12 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_1',
-  'serialization.null.format'=''  )
+  'serialization.null.format'='',
+  'external.table.purge' = 'true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@HBASE_TABLE_TEST_1
-POSTHOOK: query: CREATE TABLE HBASE_TABLE_TEST_1(
+POSTHOOK: query: CREATE EXTERNAL TABLE HBASE_TABLE_TEST_1(
   cvalue string ,
   pk string,
  ccount int   )
@@ -48,7 +49,8 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_1',
-  'serialization.null.format'=''  )
+  'serialization.null.format'='',
+  'external.table.purge' = 'true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@HBASE_TABLE_TEST_1
@@ -65,7 +67,7 @@ POSTHOOK: Output: default@VIEW_HBASE_TABLE_TEST_1
 POSTHOOK: Lineage: VIEW_HBASE_TABLE_TEST_1.ccount SIMPLE [(hbase_table_test_1)hbase_table_test_1.FieldSchema(name:ccount, type:int, comment:), ]
 POSTHOOK: Lineage: VIEW_HBASE_TABLE_TEST_1.cvalue SIMPLE [(hbase_table_test_1)hbase_table_test_1.FieldSchema(name:cvalue, type:string, comment:), ]
 POSTHOOK: Lineage: VIEW_HBASE_TABLE_TEST_1.pk SIMPLE [(hbase_table_test_1)hbase_table_test_1.FieldSchema(name:pk, type:string, comment:), ]
-PREHOOK: query: CREATE TABLE HBASE_TABLE_TEST_2(
+PREHOOK: query: CREATE EXTERNAL TABLE HBASE_TABLE_TEST_2(
   cvalue string ,
     pk string ,
    ccount int  )
@@ -80,11 +82,12 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_2',
-  'serialization.null.format'='')
+  'serialization.null.format'='',
+  'external.table.purge' = 'true')
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@HBASE_TABLE_TEST_2
-POSTHOOK: query: CREATE TABLE HBASE_TABLE_TEST_2(
+POSTHOOK: query: CREATE EXTERNAL TABLE HBASE_TABLE_TEST_2(
   cvalue string ,
     pk string ,
    ccount int  )
@@ -99,7 +102,8 @@ WITH SERDEPROPERTIES (
   'serialization.format'='1')
 TBLPROPERTIES (
   'hbase.table.name'='hbase_table_test_2',
-  'serialization.null.format'='')
+  'serialization.null.format'='',
+  'external.table.purge' = 'true')
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@HBASE_TABLE_TEST_2

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/hbasestats.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/hbasestats.q.out b/hbase-handler/src/test/results/positive/hbasestats.q.out
index e206191..b6737b7 100644
--- a/hbase-handler/src/test/results/positive/hbasestats.q.out
+++ b/hbase-handler/src/test/results/positive/hbasestats.q.out
@@ -2,19 +2,21 @@ PREHOOK: query: DROP TABLE users
 PREHOOK: type: DROPTABLE
 POSTHOOK: query: DROP TABLE users
 POSTHOOK: type: DROPTABLE
-PREHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int)
+PREHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@users
-POSTHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int)
+POSTHOOK: query: CREATE EXTERNAL TABLE users(key string, state string, country string, country_id int)
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES (
 "hbase.columns.mapping" = "info:state,info:country,info:country_id"
 )
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@users
@@ -35,10 +37,12 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"country\":\"true\",\"country_id\":\"true\",\"key\":\"true\",\"state\":\"true\"}}
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 	numFiles            	0                   
 	numRows             	0                   
 	rawDataSize         	0                   
@@ -124,9 +128,11 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 #### A masked pattern was here ####
 	numFiles            	0                   
 	numRows             	0                   
@@ -189,9 +195,11 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 #### A masked pattern was here ####
 	numFiles            	0                   
 	numRows             	0                   
@@ -245,10 +253,12 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
 	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 #### A masked pattern was here ####
 	numFiles            	0                   
 	numRows             	2                   
@@ -349,9 +359,11 @@ Database:           	default
 #### A masked pattern was here ####
 Retention:          	0                   	 
 #### A masked pattern was here ####
-Table Type:         	MANAGED_TABLE       	 
+Table Type:         	EXTERNAL_TABLE      	 
 Table Parameters:	 	 
+	EXTERNAL            	TRUE                
 	bucketing_version   	2                   
+	external.table.purge	true                
 #### A masked pattern was here ####
 	numFiles            	0                   
 	numRows             	2                   

http://git-wip-us.apache.org/repos/asf/hive/blob/3b88d6c1/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
----------------------------------------------------------------------
diff --git a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
index 18441e8..8112046 100644
--- a/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
+++ b/hbase-handler/src/test/results/positive/ppd_key_ranges.q.out
@@ -1,12 +1,14 @@
-PREHOOK: query: CREATE TABLE hbase_ppd_keyrange(key int, value string) 
+PREHOOK: query: CREATE EXTERNAL TABLE hbase_ppd_keyrange(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#binary,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 PREHOOK: type: CREATETABLE
 PREHOOK: Output: database:default
 PREHOOK: Output: default@hbase_ppd_keyrange
-POSTHOOK: query: CREATE TABLE hbase_ppd_keyrange(key int, value string) 
+POSTHOOK: query: CREATE EXTERNAL TABLE hbase_ppd_keyrange(key int, value string) 
 STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
 WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key#binary,cf:string")
+TBLPROPERTIES ("external.table.purge" = "true")
 POSTHOOK: type: CREATETABLE
 POSTHOOK: Output: database:default
 POSTHOOK: Output: default@hbase_ppd_keyrange